content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python
"""
Session library
Copyright 2020-2021 Leboncoin
Licensed under the Apache License, Version 2.0
Written by Nicolas BEGUIER ([email protected])
"""
# Third party library imports
import boto3
PIVOTAL_ROLE = 'arn:aws:iam::xxxxxxxxxxxx:role/AWS-Tower'
def get_session(role_arn, region_name):
"""
Returns a session for the specified accountId
"""
sts_connection = boto3.client('sts')
acct_a = sts_connection.assume_role(
RoleArn=PIVOTAL_ROLE,
RoleSessionName='AWS-Tower'
)
access_key = acct_a['Credentials']['AccessKeyId']
secret_key = acct_a['Credentials']['SecretAccessKey']
session_token = acct_a['Credentials']['SessionToken']
sts_connection_2 = boto3.client(
'sts',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
)
acct_b = sts_connection_2.assume_role(
RoleArn=role_arn,
RoleSessionName='Readonly'
)
access_key = acct_b['Credentials']['AccessKeyId']
secret_key = acct_b['Credentials']['SecretAccessKey']
session_token = acct_b['Credentials']['SessionToken']
session = boto3.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name=region_name
)
return session
|
from types import SimpleNamespace
from pandas import DataFrame
from .league import League
from .player import Player
from .user import User
from ..exceptions import RosterNotFoundException
class Team:
def __init__(self, context, user, league):
self._context = context
if type(league) == str:
league = League(context, league)
if type(user) == str:
user = User(context, user, league)
self.user = user
self.league = league
@property
def players(self):
return [Player(self._context, player_id) for player_id in self._get_roster()['players']]
@property
def roster_positions(self):
return self.league.roster_positions
def scoring_dataframe(self, stats_mode='statistics'):
data, columns = self.scoring(stats_mode)
return DataFrame(data, columns=columns)
def scoring(self, stats_mode='statistics'):
first_week = 1
last_week = self._context.current_week
columns = ['name', 'age', 'position']
columns += ['week_{}'.format(week) for week in range(first_week, last_week + 1)]
data = self._scoring_generator(stats_mode, first_week, last_week)
return data, columns
def best_projected_lineup(self, week=None):
if week is None:
week = self._context.current_week
starters = self._best_projected_lineup(week)
lineup = DataFrame(starters)
lineup = lineup[['name', 'position', 'role', f'week_{week}']]
lineup = lineup.rename(columns={f'week_{week}': 'projection'})
return lineup.reset_index(drop=True)
def player_scoring_per_week(self, player, stats_mode='statistics', first_week=1, last_week=None):
if last_week is None:
last_week = self._context.current_week
return list(self._player_scoring_per_week_generator(player, stats_mode, first_week, last_week))
def __str__(self):
return 'Team(User: {}, League: {})'.format(self.user, self.league)
def _scoring_generator(self, stats_mode, first_week, last_week):
for player in self.players:
data = [player.name, player.age, ','.join(player.fantasy_positions)]
scoring = self.player_scoring_per_week(player, stats_mode, first_week, last_week)
yield data + scoring
def _get_roster(self):
for roster in self.league.rosters:
if roster['owner_id'] == self.user.id:
return roster
raise RosterNotFoundException('Roster for {} not found'.format(self.user.id))
def _player_scoring_per_week_generator(self, player, stats_mode, first_week, last_week):
for week in range(first_week, last_week + 1):
week_stats = player.week_statistics(week, stats_mode)
item = self._calculate_week_total_points(week_stats)
yield item
def _calculate_week_total_points(self, week_stats):
total = 0
for k, v in week_stats.items():
if k in self.league.scoring_settings:
total += self.league.scoring_settings[k] * v
return total
def _best_projected_lineup(self, week):
projections = self.scoring_dataframe(stats_mode='projections')
sorted_projections = projections.sort_values(by=f'week_{week}', ascending=False)
sorted_projections['role'] = ''
positions = ['RB', 'WR', 'TE', 'QB', 'LB', 'DB', 'DL', 'FLEX', 'SUPER_FLEX', 'IDP_FLEX', 'DEF', 'K']
roles = {position: self.roster_positions.count(position) for position in positions}
manager = SimpleNamespace(starters=[], roles=roles, jokers=[])
for idx, player in sorted_projections.iterrows():
self._get_available_position(manager, player)
return manager.starters
@staticmethod
def _get_available_position(manager, player):
positions = player.position.split(',')
for position in positions:
if manager.roles.get(position, 0) > 0:
if len(positions) > 1:
joker = {
'index': len(manager.starters),
'current': position,
'destinations': [p for p in positions if p != position]
}
manager.jokers.append(joker)
manager.roles[position] -= 1
player.role = position
manager.starters.append(player)
return
for joker in manager.jokers:
for position in positions:
if joker['current'] == position:
for destination in joker['destinations']:
if manager.roles.get(destination, 0) > 0:
manager.starters[joker['index']].role = destination
if len(joker['destinations']) > 1:
new_joker = {
'index': joker['index'],
'current': destination,
'destinations': [p for p in joker['destinations'] if p != destination]
}
manager.jokers.append(new_joker)
manager.jokers.remove(joker)
manager.roles[destination] -= 1
player.role = position
manager.starters.append(player)
return
flex = ['RB', 'WR', 'TE']
super_flex = ['RB', 'WR', 'TE', 'QB']
idp_flex = ['LB', 'DB', 'DL']
for position in positions:
if manager.roles.get('FLEX', 0) > 0 and position in flex:
manager.roles['FLEX'] -= 1
player.role = 'FLEX'
manager.starters.append(player)
return
if manager.roles.get('SUPER_FLEX', 0) > 0 and position in super_flex:
manager.roles['SUPER_FLEX'] -= 1
player.role = 'SUPER_FLEX'
manager.starters.append(player)
return
if manager.roles.get('IDP_FLEX', 0) > 0 and position in idp_flex:
manager.roles['IDP_FLEX'] -= 1
player.role = 'IDP_FLEX'
manager.starters.append(player)
return
|
# -*- coding: utf-8 -*-
# Created on Fri Jun 15 13:59:04 2012
# @author: Merlijn van Deen <[email protected]>
"""
pyTables helper functions
Most of these are helper functions for `pytables_test.py` and
`pytables_import_shear.py`. The exception to this is `read_packing`,
which loads a packing from the hdf5 file to a dictionary, as used
by most other code.
"""
import tables
import numpy
def require_groups(root, name, *args, **kwargs):
names = name.split("/")
return reduce(lambda x,y: require_group(x,y,*args,**kwargs), names, root)
def require_group(root, name, *args, **kwargs):
if '/' in name:
return require_groups(root, name, *args, **kwargs)
h5f = root._v_file
try:
return root._f_getChild(name)
except tables.exceptions.NoSuchNodeError:
return h5f.createGroup(root, name, *args, **kwargs)
def require_table(root, name, dtype, *args, **kwargs):
try:
return root._f_getChild(name)
except tables.exceptions.NoSuchNodeError:
return root._v_file.createTable(root, name, dtype, *args, **kwargs)
def store_table(root, name, data, *args, **kwargs):
data = numpy.array(data)
return root._v_file.createTable(root, name, data, expectedrows=data.shape[0], chunkshape=data.shape)
def add_to_table(table, data={}, **kwargs):
row = table.row
for key in table.colnames:
if key in data:
row[key] = data[key]
elif key in kwargs:
row[key] = kwargs[key]
else:
if table.coldtypes[key] == numpy.string_:
row[key] = "(unknown)"
else:
row[key] = numpy.array(0, dtype=table.coldtypes[key]) * numpy.nan
row.append()
def read_packing(pack):
"""
in: pack -- pytables node containing a packing
(e.g. `/N1024/P3.1620e-03/0090` in N1024~P3162e-3_tables.h5,
or `/N1024/P3.1620e-03/0090/SR/0000` in N1024~P3.162e-3_shear.h5)
>>> import tables
>>> f = tables.File(r"N256~P1e-3_tables.h5")
>>> node = f.root.__getattr__('N256').__getattr__('p1.0000e-03').__getattr__('9000')
>>> read_packing(node)
{ (...),
'L': 37.448300000000003,
'L1': array([ 37.28610578, 0. ]),
'L2': array([ -0.19961572, 37.61113632]),
'N': 256,
...
'particles': array([ (33.51530087911755, 2.706168622523819e-16, 25.13120589237754, -4.2327252813834093e-16, 1.0),
...
]),
(...)
}
"""
packing = dict((x, pack._v_attrs[x]) for x in pack._v_attrs._v_attrnames)
packing['particles'] = pack.particles.read()
return packing
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.patch_request import PatchRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.query.query_command import QueryCommand
from pyopenproject.model.query import Query
class Star(QueryCommand):
def __init__(self, connection, query):
super().__init__(connection)
self.query = query
def execute(self):
try:
json_obj = PatchRequest(connection=self.connection,
context=f"{self.CONTEXT}/{self.query.id}/star").execute()
return Query(json_obj)
except RequestError as re:
raise BusinessError(f"Error to star: {self.query.id}") from re
|
from ntp.kb import Atom, load_from_file, normalize
from ntp.nkb import kb2nkb, augment_with_templates, embed_symbol, rule2struct
from ntp.prover import prove, representation_match, is_tensor, is_parameter, neural_link_predict
from ntp.tp import rule2string
import tensorflow as tf
from pprint import pprint
from ntp.jtr.train import train
from ntp.jtr.util.hooks import LossHook, ExamplesPerSecHook, ETAHook, TensorHook
from ntp.jtr.preprocess.batch import GeneratorWithRestart
from ntp.jtr.util.util import get_timestamped_dir, load_conf, save_conf, tfprint
import numpy as np
import random
import copy
import os
from tabulate import tabulate
from tensorflow.python import debug as tf_debug
import sklearn
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
tf.set_random_seed(1337)
np.random.seed(1337)
def kb_ids2known_facts(kb_ids):
"""
:param kb_ids: a knowledge base of facts that are already mapped to ids
:return: a set of all known facts (used later for negative sampling)
"""
facts = set()
for struct in kb_ids:
arrays = kb_ids[struct][0]
num_facts = len(arrays[0])
for i in range(num_facts):
fact = [x[i] for x in arrays]
facts.add(tuple(fact))
return facts
if __name__ == '__main__':
if len(sys.argv) > 1:
conf = load_conf(sys.argv[1])
else:
conf = load_conf("./conf/wordnet.conf")
DIR = "/".join(sys.argv[1].split("/")[:-1])
experiment_prefix = conf["meta"]["experiment_prefix"]
experiment_dir = get_timestamped_dir("./out/" + experiment_prefix,
conf["meta"]["name"],
link_to_latest=True)
save_conf(experiment_dir + conf["meta"]["file_name"], conf)
pprint(conf)
DEBUG = conf["meta"]["debug"]
OUTPUT_PREDICTIONS = conf["meta"]["output_predictions"]
CHECK_NUMERICS = conf["meta"]["check_numerics"]
TFDBG = conf["meta"]["tfdbg"]
TEST_GRAPH_CREATION = conf["meta"]["test_graph_creation"]
TRAIN = False
TEST_TIME_NEURAL_LINK_PREDICTION = \
conf["meta"]["test_time_neural_link_prediction"]
TEST_TIME_BATCHING = conf["meta"]["test_time_batching"]
ENSEMBLE = conf["meta"]["ensemble"]
EXPERIMENT = conf["meta"]["experiment_prefix"]
PERMUTATION = conf["meta"]["permutation"]
print(PERMUTATION)
TEMPLATES_PATH = conf["data"]["templates"]
INPUT_SIZE = conf["model"]["input_size"]
UNIFICATION = conf["model"]["unification"]
L2 = conf["model"]["l2"]
UNIT_NORMALIZE = conf["model"]["unit_normalize"]
K_MAX = conf["model"]["k_max"]
NEURL_LINK_PREDICTOR = conf["model"]["neural_link_predictor"]
TRAIN_0NTP = conf["model"]["train_0ntp"]
KEEP_PROB = conf["model"]["keep_prob"]
MAX_DEPTH = conf["model"]["max_depth"]
TRAIN_NTP = TRAIN_0NTP or TEMPLATES_PATH is not None
if NEURL_LINK_PREDICTOR is None and not TRAIN_0NTP:
raise AttributeError("Can't train non-0NTP without link predictor")
REPORT_INTERVAL = conf["training"]["report_interval"]
NUM_EPOCHS = conf["training"]["num_epochs"]
CLIP = conf["training"]["clip"]
LEARNING_RATE = conf["training"]["learning_rate"]
EPSILON = conf["training"]["epsilon"]
OPTIMIZER = conf["training"]["optimizer"]
POS_PER_BATCH = conf["training"]["pos_per_batch"]
NEG_PER_POS = conf["training"]["neg_per_pos"]
SAMPLING_SCHEME = conf["training"]["sampling_scheme"]
MEAN_LOSS = conf["training"]["mean_loss"]
INIT = conf["training"]["init"]
NUM_CORRUPTIONS = 0
if SAMPLING_SCHEME == "all":
NUM_CORRUPTIONS = 4
else:
NUM_CORRUPTIONS = 2
BATCH_SIZE = POS_PER_BATCH + POS_PER_BATCH * NEG_PER_POS * NUM_CORRUPTIONS
kb = load_from_file(conf["data"]["kb"])
print("Batch size: %d, pos: %d, neg: %d, corrupted: %d" %
(BATCH_SIZE, POS_PER_BATCH, NEG_PER_POS, NUM_CORRUPTIONS))
if TEMPLATES_PATH is not None:
rule_templates = load_from_file(TEMPLATES_PATH, rule_template=True)
kb = augment_with_templates(kb, rule_templates)
kb = normalize(kb)
nkb, kb_ids, vocab, emb, predicate_ids, constant_ids = \
kb2nkb(kb, INPUT_SIZE, unit_normalize=UNIT_NORMALIZE,
keep_prob=KEEP_PROB, permutation=PERMUTATION)
pprint(nkb.keys())
pprint(vocab.sym2id)
known_facts = kb_ids2known_facts(kb_ids)
# using same embedding matrix
# tf.get_variable_scope().reuse_variables()
goal_struct = rule2struct(normalize([[Atom('p1', ["c0", "c1"])]])[0])
if EXPERIMENT == "animals":
goal_struct = rule2struct(normalize([[Atom('p1', ["c0"])]])[0])
def embed(goal, emb, keep_prob=1.0):
return [embed_symbol(x, emb, unit_normalize=UNIT_NORMALIZE,
keep_prob=keep_prob) for x in goal]
def get_mask_id(kb, goal_struct, goal):
if goal_struct in kb:
facts = kb[goal_struct][0]
num_facts = len(facts[0])
mask_id = None
for i in range(num_facts):
exists = True
for j in range(len(goal)):
exists = exists and goal[j] == facts[j][i]
if exists:
mask_id = i
if mask_id is not None:
return mask_id
return None
mask_indices = tf.placeholder("int32", [POS_PER_BATCH, 2], name="mask_indices")
goal_placeholder = [tf.placeholder("int32", [BATCH_SIZE], name="goal_%d" % i)
for i in range(0, len(goal_struct[0]))]
goal_emb = embed(goal_placeholder, emb, KEEP_PROB)
num_facts = len(kb_ids[goal_struct][0][0])
mask = tf.Variable(np.ones([num_facts, BATCH_SIZE], np.float32),
trainable=False, name="fact_mask")
mask_set = tf.scatter_nd_update(mask, mask_indices, [0.0] * POS_PER_BATCH)
mask_unset = tf.scatter_nd_update(mask, mask_indices, [1.0] * POS_PER_BATCH)
target = tf.placeholder("float32", [BATCH_SIZE], name="target")
AGGREGATION_METHOD = conf["model"]["aggregate_fun"]
aggregation_fun = None
if AGGREGATION_METHOD == "Max":
def fun(x):
return tf.reduce_max(x, 1)
aggregation_fun = fun
elif AGGREGATION_METHOD == "Mean":
def fun(x):
return tf.reduce_mean(x, 1)
aggregation_fun = fun
elif AGGREGATION_METHOD == "LogSumExp":
# fixme: problem since in our case this is already applied to sub-paths
def fun(x):
return tf.reduce_logsumexp(x, 1)
aggregation_fun = fun
elif AGGREGATION_METHOD == "MaxMean":
def fun(x):
return (tf.reduce_max(x, 1) + tf.reduce_mean(x, 1)) / 2.0
aggregation_fun = fun
else:
raise AttributeError("Aggregation function %s unknown" %
AGGREGATION_METHOD)
def corrupt_goal(goal, args=[0], tries=100):
if tries == 0:
print("WARNING: Could not corrupt", goal)
return goal
else:
goal_corrupted = copy.deepcopy(goal)
for arg in args:
corrupt = constant_ids[random.randint(0, len(constant_ids) - 1)]
goal_corrupted[arg + 1] = corrupt
if tuple(goal_corrupted) in known_facts:
return corrupt_goal(goal, args, tries - 1)
else:
return goal_corrupted
def get_batches():
facts = kb_ids[goal_struct][0]
num_facts = len(facts[0])
fact_ids = list(range(0, num_facts))
assert num_facts >= POS_PER_BATCH
def generator():
random.shuffle(fact_ids)
feed_dicts = []
mask_indices_init = np.zeros([POS_PER_BATCH, 2], dtype=np.int32)
goals_in_batch = [[] for _ in goal_placeholder]
targets_in_batch = []
j = 0
jj = 0
for i, ix in enumerate(fact_ids):
current_goal = [x[ix] for x in facts]
for k in range(len(current_goal)):
goals_in_batch[k].append(current_goal[k])
targets_in_batch += [1] + [0] * (NEG_PER_POS * NUM_CORRUPTIONS)
mask_indices_init[j] = [ix, jj]
j += 1
jj += 1 + (NEG_PER_POS * NUM_CORRUPTIONS)
for _ in range(NEG_PER_POS):
currupt_goal_1 = corrupt_goal(current_goal, [0])
for k in range(len(currupt_goal_1)):
goals_in_batch[k].append(currupt_goal_1[k])
currupt_goal_2 = corrupt_goal(current_goal, [1])
for k in range(len(currupt_goal_2)):
goals_in_batch[k].append(currupt_goal_2[k])
if SAMPLING_SCHEME == "all":
currupt_goal_3 = corrupt_goal(current_goal, [0, 1])
for k in range(len(currupt_goal_3)):
goals_in_batch[k].append(currupt_goal_3[k])
currupt_goal_4 = corrupt_goal(current_goal, [0, 1])
for k in range(len(currupt_goal_4)):
goals_in_batch[k].append(currupt_goal_4[k])
if j % POS_PER_BATCH == 0:
feed_dict = {
mask_indices: mask_indices_init,
target: targets_in_batch,
}
for k in range(len(goal_placeholder)):
feed_dict[goal_placeholder[k]] = goals_in_batch[k]
feed_dicts.append(feed_dict)
mask_indices_init = np.zeros([POS_PER_BATCH, 2], dtype=np.int32)
goals_in_batch = [[] for _ in goal_placeholder]
targets_in_batch = []
j = 0
jj = 0
for f in feed_dicts:
yield f
return GeneratorWithRestart(generator)
train_feed_dicts = get_batches()
# for _ in range(6):
# for x in train_feed_dicts:
# for key in x:
# val = x[key]
# print(key, val)
# print()
# print("---")
# os._exit(-1)
prove_success = prove(nkb, goal_emb, goal_struct, mask, trace=True,
aggregation_fun=aggregation_fun, k_max=K_MAX,
train_0ntp=TRAIN_0NTP, max_depth=MAX_DEPTH)
print("Graph creation complete.")
print("Variables")
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print("\t", v)
if TEST_GRAPH_CREATION:
exit(1)
if DEBUG and TRAIN_NTP:
prove_success = tfprint(prove_success, "NTP success:\n")
def caculate_loss(success, target):
if AGGREGATION_METHOD == "LogSumExp":
return -(target * 2 - 1) * prove_success
else:
x = success
z = target
return -z * tf.log(tf.clip_by_value(x, EPSILON, 1.0)) - \
(1 - z) * tf.log(tf.clip_by_value(1 - x, EPSILON, 1.0))
# using numerical stable implementation from tf.nn.sigmoid_cross_entropy_with_logits
# loss = tf.maximum(x, 0) - x * target + tf.log(1 + tf.exp(-tf.abs(x)))
prover_loss = caculate_loss(prove_success, target)
if DEBUG:
prover_loss = tfprint(prover_loss, "NTP loss:\n")
if NEURL_LINK_PREDICTOR is not None:
neural_link_prediction_success = \
tf.squeeze(neural_link_predict(goal_emb, model=NEURL_LINK_PREDICTOR))
if DEBUG:
neural_link_prediction_success = \
tfprint(neural_link_prediction_success, "link predict:\n")
neural_link_prediction_loss = \
caculate_loss(neural_link_prediction_success, target)
if TRAIN_NTP:
loss = neural_link_prediction_loss + prover_loss
else:
loss = neural_link_prediction_loss
if TEST_TIME_NEURAL_LINK_PREDICTION:
test_time_prediction = \
tf.maximum(neural_link_prediction_success, prove_success)
# fixme: refactor!
if ENSEMBLE:
prove_success = \
tf.maximum(neural_link_prediction_success, prove_success)
loss = caculate_loss(prove_success, target)
else:
loss = prover_loss
test_time_prediction = prove_success
if DEBUG:
loss = tfprint(loss, "loss:\n")
if MEAN_LOSS:
loss = tf.reduce_mean(loss)
else:
loss = tf.reduce_sum(loss)
# loss = tf.reduce_sum(loss)
# loss = tfprint(loss, "loss reduced:\n")
def pre_run(sess, epoch, feed_dict, loss, predict):
results = sess.run(mask_set, {mask_indices: feed_dict[mask_indices]})
if DEBUG:
# for id in vocab.id2sym:
# print(id, vocab.id2sym[id])
# print("mask\n", results)
for k in feed_dict:
print(k, feed_dict[k])
pass
def post_run(sess, epoch, feed_dict, loss, predict):
results = sess.run(mask_unset, {mask_indices: feed_dict[mask_indices]})
# print(results)
if DEBUG:
exit(1)
pass
summary_writer = tf.summary.FileWriter(experiment_dir)
optim = None
if OPTIMIZER == "Adam":
optim = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE,
epsilon=EPSILON)
if OPTIMIZER == "AdaGrad":
optim = tf.train.AdagradOptimizer(learning_rate=LEARNING_RATE)
elif OPTIMIZER == "SGD":
optim = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)
gradients = optim.compute_gradients(loss)
variables = [x[1] for x in gradients]
gradients = [x[0] for x in gradients]
hooks = [
LossHook(REPORT_INTERVAL, 1, summary_writer=summary_writer),
ExamplesPerSecHook(REPORT_INTERVAL, BATCH_SIZE,
summary_writer=summary_writer),
ETAHook(REPORT_INTERVAL, NUM_EPOCHS, 10, summary_writer=summary_writer)
]
if DEBUG:
hooks.append(
TensorHook(REPORT_INTERVAL, variables, prefix="variables_",
modes=["mean_abs", "std", "norm", "max", "min"],
global_statistics=True, summary_writer=summary_writer))
hooks.append(
TensorHook(REPORT_INTERVAL, gradients, prefix="gradients_",
modes=["mean_abs", "std", "norm", "max", "min"],
global_statistics=True, summary_writer=summary_writer))
sess = tf.Session()
if TFDBG:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
if TRAIN:
train(loss, optim, train_feed_dicts, max_epochs=NUM_EPOCHS,
hooks=hooks, pre_run=pre_run, post_run=post_run, sess=sess, l2=L2,
clip=CLIP, check_numerics=CHECK_NUMERICS)
else:
# todo: load model
print("Loading model...")
# tf.saved_model.loader.load(sess, ["foo"], DIR)
tf.train.Saver().restore(sess, DIR + "/model.ckpt-0")
# sess.run(tf.global_variables_initializer())
print(emb.eval(sess))
def decode(x, emb, vocab, valid_ids, sess):
valid_ids = set(valid_ids)
num_rules = int(x.get_shape()[0])
num_symbols = int(emb.get_shape()[0])
mask = np.ones([num_symbols], dtype=np.float32)
for i in range(len(vocab)):
if i not in valid_ids: # or i == vocab.sym2id[vocab.unk]:
mask[i] = 0 # np.zeros([input_size], dtype=np.float32)
# -- num_rules x num_symbols
mask = tf.tile(tf.expand_dims(mask, 0), [num_rules, 1])
# print(sess.run(mask[0], {}))
# -- num_rules x num_symbols
match = representation_match(x, emb)
success_masked = match * mask
success, ix = tf.nn.top_k(success_masked, 1)
success_val, ix_val = sess.run([success, ix], {})
syms = []
for i, row in enumerate(ix_val):
sym_id = row[0]
sym_success = success_val[i][0]
sym = vocab.id2sym[sym_id]
syms.append((sym, sym_success))
return syms
def unstack_rules(rule):
rules = []
num_rules = len(rule[0].predicate)
for i in range(num_rules):
current_rule = []
confidence = 1.0
for atom in rule:
predicate = atom.predicate
if isinstance(predicate, list):
predicate, success = predicate[i]
confidence = min(confidence, success)
arguments = []
for argument in atom.arguments:
if isinstance(argument, list):
argument, success = argument[i]
arguments.append(argument)
confidence = min(confidence, success)
else:
arguments.append(argument)
current_rule.append(Atom(predicate, arguments))
rules.append((current_rule, confidence))
return rules
predicate_ids_with_placholders = copy.deepcopy(predicate_ids)
predicate_ids = []
for id in predicate_ids_with_placholders:
if not is_parameter(vocab.id2sym[id]):
predicate_ids.append(id)
print("Writing induced logic program to", experiment_dir + "rules.nl")
with open(experiment_dir + "rules.nl", "w") as f:
for struct in nkb:
# it's a rule
if len(struct) > 1:
rule = nkb[struct]
rule_sym = []
for atom in rule:
atom_sym = []
for i, sym in enumerate(atom):
if is_tensor(sym):
valid_ids = predicate_ids if i == 0 else constant_ids
syms = decode(sym, emb, vocab, valid_ids, sess)
print(syms)
atom_sym.append(syms)
else:
atom_sym.append(sym[0])
rule_sym.append(Atom(atom_sym[0], atom_sym[1:]))
rules = unstack_rules(rule_sym)
rules.sort(key=lambda x: -x[1])
# filtering for highly confident rules
# rules = [rule for rule in rules if rule[1] > 0.8]
f.write(str(struct) + "\n")
for rule, confidence in rules:
f.write("%s\t%s\n" % (confidence, rule2string(rule)))
f.write("\n")
f.close()
if OUTPUT_PREDICTIONS:
goal_placeholder = [
tf.placeholder("int32", [1], name="goal_%d" % i)
for i in range(0, len(goal_struct[0]))]
goal_emb = embed(goal_placeholder, emb, keep_prob=1.0)
if TEST_TIME_BATCHING:
copies = BATCH_SIZE
for i, x in enumerate(goal_emb):
goal_emb[i] = tf.tile(x, [copies, 1])
prove_success_test_time = \
prove(nkb, goal_emb, goal_struct, mask_var=None, trace=True,
aggregation_fun=aggregation_fun, k_max=K_MAX,
train_0ntp=TRAIN_0NTP, max_depth=MAX_DEPTH)
if NEURL_LINK_PREDICTOR is not None:
neural_link_prediction_success_test_time = \
neural_link_predict(goal_emb, model=NEURL_LINK_PREDICTOR)
if TEST_TIME_NEURAL_LINK_PREDICTION:
prove_success_test_time = \
tf.maximum(prove_success_test_time,
neural_link_prediction_success_test_time)
table = []
for sym in vocab.sym2id:
id = vocab.sym2id[sym]
vec = sess.run(emb[id])
table.append([sym, id, vec])
# print(tabulate(table)) # tablefmt='orgtbl'
def predict(predicate, arg1, arg2):
feed_dict = {}
goal = [vocab(predicate), vocab(arg1), vocab(arg2)]
for k, d in zip(goal_placeholder, goal):
feed_dict[k] = [d]
success = prove_success_test_time
if AGGREGATION_METHOD == "LogSumExp":
success = tf.sigmoid(success)
if TEST_TIME_NEURAL_LINK_PREDICTION:
success = tf.squeeze(success)
success_val = sess.run(success, feed_dict=feed_dict)
if TEST_TIME_BATCHING:
if not all([x == success_val[0] for x in success_val]):
print("WARNING! Numerical instability?", success_val)
return success_val
table = []
headers = [vocab.id2sym[rid] for rid in predicate_ids]
for i, e1id in enumerate(constant_ids):
for j, e2id in enumerate(constant_ids):
# if i <= j:
e1 = vocab.id2sym[e1id]
e2 = vocab.id2sym[e2id]
row = [e1, e2]
for r in headers:
score = predict(r, e1, e2)
if TEST_TIME_BATCHING:
score = score[0]
row.append(score)
table.append(row)
print(tabulate(table, headers=["e1", "e2"] + headers))
# --- Embedding Visualization
from tensorflow.contrib.tensorboard.plugins import projector
saver = tf.train.Saver()
saver.save(sess, os.path.join(experiment_dir, "model.ckpt"), 0)
saver.save(sess, "model.ckpt", 0)
# Use the same LOG_DIR where you stored your checkpoint.
summary_writer = tf.summary.FileWriter(experiment_dir)
# Format:
# tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
config = projector.ProjectorConfig()
# You can add multiple embeddings. Here we add only one.
embedding = config.embeddings.add()
embedding.tensor_name = emb.name
with open(experiment_dir + "metadata.tsv", "w") as f:
f.write("Symbol\tClass\n")
for i, id in enumerate(vocab.id2sym):
sym = vocab.id2sym[id]
typ = ""
if is_parameter(sym):
typ = "Param"
elif id in predicate_ids:
typ = "Predicate"
else:
typ = "Constant"
f.write("%s\t%s\n" % (sym, typ))
f.close()
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = 'metadata.tsv'
# Saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(summary_writer, config)
# --- Evaluation for Countries
if conf["meta"]["experiment_prefix"] == "countries":
test_set = conf["meta"]["test_set"]
test_countries = []
with open("./data/countries/%s.txt" % test_set, "r") as f:
for line in f.readlines():
test_countries.append(line[:-1])
regions = []
with open("./data/countries/regions.txt", "r") as f:
for line in f.readlines():
regions.append(line[:-1])
regions_set = set(regions)
ground_truth = load_from_file("./data/countries/countries.nl")
country2region = {}
for atom in ground_truth:
atom = atom[0]
if atom.predicate == "locatedIn":
country, region = atom.arguments
if region in regions:
country2region[country] = region
print(test_countries)
print(regions)
goal_placeholder = [
tf.placeholder("int32", [len(regions)], name="goal_%d" % i)
for i in range(0, len(goal_struct[0]))]
goal_emb = embed(goal_placeholder, emb, keep_prob=1.0)
prove_success_test_time = \
prove(nkb, goal_emb, goal_struct, mask_var=None, trace=True,
aggregation_fun=aggregation_fun, k_max=K_MAX,
train_0ntp=TRAIN_0NTP, max_depth=MAX_DEPTH)
if NEURL_LINK_PREDICTOR is not None:
neural_link_prediction_success_test_time = \
tf.squeeze(neural_link_predict(goal_emb, model=NEURL_LINK_PREDICTOR))
if TEST_TIME_NEURAL_LINK_PREDICTION:
prove_success_test_time = \
tf.maximum(prove_success_test_time,
neural_link_prediction_success_test_time)
locatedIn_ids = [vocab("locatedIn")] * len(regions)
regions_ids = [vocab(region) for region in regions]
def predict(country):
feed_dict = {}
country_ids = [vocab(country)] * len(regions)
goal = [locatedIn_ids, country_ids, regions_ids]
for k, d in zip(goal_placeholder, goal):
feed_dict[k] = d
success = prove_success_test_time
if AGGREGATION_METHOD == "LogSumExp":
success = tf.sigmoid(success)
if TEST_TIME_NEURAL_LINK_PREDICTION:
success = tf.squeeze(success)
success_val = sess.run(success, feed_dict=feed_dict)
return success_val
scores_pl = tf.placeholder(tf.float32, [len(regions)])
target_pl = tf.placeholder(tf.int32, [len(regions)])
auc = tf.contrib.metrics.streaming_auc(scores_pl, target_pl, curve='PR',
num_thresholds=1000)
# sess.run(tf.local_variables_initializer())
table = []
auc_val = 0.0
scores_all = list()
target_all = list()
for country in test_countries:
known_kb = country2region[country]
ix = regions.index(known_kb)
print(country, known_kb, ix)
scores = predict(country)
# fixme: just for sanity checking; remove afterwards
# scores = np.random.rand(5)
table.append([country] + list(scores))
target = np.zeros(len(regions), np.int32)
target[ix] = 1
# auc_val, _ = sess.run(auc, feed_dict={
# target_pl: target, scores_pl: scores})
scores_all += list(scores)
target_all += list(target)
print(tabulate(table, headers=["country"] + regions))
auc_val = \
sklearn.metrics.average_precision_score(target_all, scores_all)
print(auc_val)
import time
date = time.strftime("%Y-%m-%d")
time = time.strftime("%H-%M-%S")
config_name = conf["meta"]["name"]
config_str = str(conf)
kb = conf["data"]["kb"]
templates = conf["data"]["templates"]
model = conf["model"]["name"]
corpus = kb.split("_")[1][:2]
with open(conf["meta"]["result_file"], "a") as f:
f.write("%s\t%s\t%s\t%4.3f\t%s\t%s\t%s\t%s\n" %
(model, corpus, templates, auc_val, date, time, config_name,
config_str))
# --- Evaluation for UMLS
if EXPERIMENT in ["umls", "kinship", "nations", "animals"]:
goal_placeholder = [
tf.placeholder("int32", [BATCH_SIZE], name="goal_%d" % i)
for i in range(0, len(goal_struct[0]))]
goal_emb = embed(goal_placeholder, emb, keep_prob=1.0)
prove_success_test_time = \
prove(nkb, goal_emb, goal_struct, mask_var=None, trace=True,
aggregation_fun=aggregation_fun, k_max=K_MAX,
train_0ntp=TRAIN_0NTP, max_depth=MAX_DEPTH)
if NEURL_LINK_PREDICTOR is not None:
neural_link_prediction_success_test_time = \
tf.squeeze(
neural_link_predict(goal_emb, model=NEURL_LINK_PREDICTOR))
if TEST_TIME_NEURAL_LINK_PREDICTION:
prove_success_test_time = \
tf.maximum(prove_success_test_time,
neural_link_prediction_success_test_time)
entities = []
with open("./data/%s/entities.txt" % EXPERIMENT, "r") as f:
for entity in f.readlines():
entities.append(entity[:-1])
# print(entities)
entities = [vocab(entity) for entity in entities]
# print(entities)
test_kb = load_from_file("./data/%s/test.nl" % EXPERIMENT)
known_kb = load_from_file("./data/%s/%s.nl" % (EXPERIMENT, EXPERIMENT))
known = set()
for atom in known_kb:
atom = atom[0]
if len(atom.arguments) > 1:
known.add(tuple([
vocab(atom.predicate),
vocab(atom.arguments[0]),
vocab(atom.arguments[1])
]))
elif EXPERIMENT == "animals":
known.add(tuple([
vocab(atom.predicate),
vocab(atom.arguments[0])
]))
test = []
for atom in test_kb:
atom = atom[0]
if len(atom.arguments) > 1:
test.append(tuple([
vocab(atom.predicate),
vocab(atom.arguments[0]),
vocab(atom.arguments[1])
]))
elif EXPERIMENT == "animals":
test.append(tuple([
vocab(atom.predicate),
vocab(atom.arguments[0])
]))
cached_scores = {}
to_score = set()
for s, i, j in test:
corrupts_i = [(s, x, j) for x in entities
if (s, x, j) not in known and x != i]
corrupts_j = [(s, i, x) for x in entities
if (s, i, x) not in known and x != j]
for atom in corrupts_i:
to_score.add(atom)
for atom in corrupts_j:
to_score.add(atom)
to_score.add((s, i, j))
def chunks(ls, n):
for i in range(0, len(ls), n):
yield ls[i:i + n]
to_score = list(chunks(list(to_score), BATCH_SIZE))
for batch in to_score:
s_np = np.zeros([BATCH_SIZE], dtype=np.int32)
i_np = np.zeros([BATCH_SIZE], dtype=np.int32)
j_np = np.zeros([BATCH_SIZE], dtype=np.int32)
for i, atom in enumerate(batch):
s_np[i] = atom[0]
i_np[i] = atom[1]
j_np[i] = atom[2]
feed_dict = {}
for k, d in zip(goal_placeholder, [s_np, i_np, j_np]):
feed_dict[k] = d
scores = sess.run(prove_success_test_time, feed_dict)
for i, atom in enumerate(batch):
cached_scores[atom] = scores[i]
MRR = 0.0
HITS1 = 0.0
HITS3 = 0.0
HITS5 = 0.0
HITS10 = 0.0
counter = 0.0
for s, i, j in test:
corrupts_i = [(s, x, j) for x in entities
if (s, x, j) not in known and x != i]
corrupts_j = [(s, i, x) for x in entities
if (s, i, x) not in known and x != j]
for to_score in [corrupts_i, corrupts_j]:
to_score.append((s, i, j))
scores = [cached_scores[(s, a, b)] for s, a, b in to_score]
predict = scores[-1]
scores = sorted(scores)[::-1]
# Note that predictions for corrupted triples might by chance have exactly the same score,
# but we calculate the rank from only those corrupted triples that have a higher score akin to
# https://github.com/ttrouill/complex/blob/19f54a0efddaa8ad9f7476cd5032f8d6370e603d/efe/evaluation.py#L284
rank = scores.index(predict) + 1
counter += 1.0
if rank <= 1:
HITS1 += 1.0
if rank <= 3:
HITS3 += 1.0
if rank <= 5:
HITS5 += 1.0
if rank <= 10:
HITS10 += 1.0
MRR += 1.0 / rank
MRR /= counter
HITS1 /= counter
HITS3 /= counter
HITS5 /= counter
HITS10 /= counter
metrics = "%4.2f|%4.2f|%4.2f|%4.2f|%4.2f" % \
(MRR, HITS1, HITS3, HITS5, HITS10)
import time
date = time.strftime("%Y-%m-%d")
time = time.strftime("%H-%M-%S")
config_name = conf["meta"]["name"]
config_str = str(conf)
kb = conf["data"]["kb"]
templates = conf["data"]["templates"]
model = conf["model"]["name"]
corpus = conf["meta"]["experiment_prefix"]
with open(conf["meta"]["result_file"], "a") as f:
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %
(model, corpus, templates, metrics, date, time, config_name,
config_str))
|
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_background_thread.py
'''
import threading
import queue
import types
from PyQt5 import QtCore
#
# Decorator used to set the requires_thread_switcher property on a function
#
_requires_thread_switcher_attr = 'requires_thread_switcher'
def thread_switcher( fn ):
setattr( fn, _requires_thread_switcher_attr, True )
return fn
# predicate to detect function that requires a ThreadSwitchScheduler
def requiresThreadSwitcher( fn ):
#print( 'qqq requiresThreadSwitcher( %r ) -> %r' % (fn, getattr( fn, _requires_thread_switcher_attr, False )) )
return getattr( fn, _requires_thread_switcher_attr, False )
#------------------------------------------------------------
class MarshalledCall:
def __init__( self, function, args ):
self.function = function
self.args = args
def __call__( self ):
self.function( *self.args )
def __repr__( self ):
return '<MarshalledCall: fn=%s nargs=%d>' % (self.function.__name__, len(self.args))
class BackgroundThread(threading.Thread):
def __init__( self, app ):
threading.Thread.__init__( self )
self.app = app
self.setDaemon( 1 )
self.running = 1
self.work_queue = queue.Queue( maxsize=0 )
def run( self ):
while self.running:
function = self.work_queue.get( block=True, timeout=None )
self.app.debug_options.debugLogThreading( 'BackgroundThread.run dispatching %r' % (function,) )
try:
function()
except:
self.app.log.exception( 'function failed on background thread' )
def addWork( self, function, args ):
self.app.debug_options.debugLogThreading( 'BackgroundThread.addWork( %r, %r )' % (function, args) )
assert self.running
self.work_queue.put( MarshalledCall( function, args ), block=False, timeout=None )
def shutdown( self ):
self.addWork( self.__shutdown )
def __shutdown( self ):
self.running = 0
#
# BackgroundWorkMixin
#
# Add features that allow processing to switch
# easily from foreground to background threads
#
# runInBackground - call function on the background thread
# runInForeground - call function on the foreground thread
#
# deferRunInForeground
# - used to move a callback made in the background
# into the foreground with the args provided in the
# background call back.
#
# threadSwitcher - call function that is allowed to yield to move between threads.
# - function starts in the foreground
# - switch to the background by yield switchToBackground
# - switch to the foreground by yield switchToForeground
#
# assumes that self is app
class BackgroundWorkMixin:
foregroundProcessSignal = QtCore.pyqtSignal( [MarshalledCall] )
def __init__( self ):
self.foreground_thread = threading.currentThread()
self.background_thread = BackgroundThread( self )
def startBackgoundThread( self ):
self.foregroundProcessSignal.connect( self.__runInForeground, type=QtCore.Qt.QueuedConnection )
self.background_thread.start()
def isForegroundThread( self ):
# return true if the caller is running on the main thread
return self.foreground_thread is threading.currentThread()
def deferRunInForeground( self, function ):
return DeferRunInForeground( self, function )
def runInBackground( self, function, args ):
self.debug_options.debugLogThreading( 'runInBackground( %r, %r )' % (function, args) )
self.background_thread.addWork( function, args )
def runInForeground( self, function, args ):
# cannot call logging from here as this will cause the log call to be marshelled
self.foregroundProcessSignal.emit( MarshalledCall( function, args ) )
def wrapWithThreadSwitcher( self, function, reason='' ):
if requiresThreadSwitcher( function ):
return ThreadSwitchScheduler( self, function, reason )
else:
return function
# alias that are better names when used from the threadSwitcher function
switchToForeground = runInForeground
switchToBackground = runInBackground
def __runInForeground( self, function ):
self.debug_options.debugLogThreading( '__runInForeground( %r )' % (function,) )
try:
function()
except:
self.log.exception( 'foregroundProcess function failed' )
class DeferRunInForeground:
def __init__( self, app, function ):
self.app = app
self.function = function
def __call__( self, *args ):
self.app.runInForeground( self.function, args )
class ThreadSwitchScheduler:
next_instance_id = 0
def __init__( self, app, function, reason ):
self.app = app
self.function = function
self.reason = reason
self.debugLogThreading = self.app.debug_options.debugLogThreading
ThreadSwitchScheduler.next_instance_id += 1
self.instance_id = self.next_instance_id
def __call__( self, *args, **kwds ):
self.debugLogThreading( 'ThreadSwitchScheduler(%d:%s): start %r( %r, %r )' % (self.instance_id, self.reason, self.function, args, kwds) )
#pylint disable=bare-except
try:
# call the function
result = self.function( *args, **kwds )
# did the function run or make a generator?
if type(result) != types.GeneratorType:
self.debugLogThreading( 'ThreadSwitchScheduler(%d:%s): done (not GeneratorType)' % (self.instance_id, self.reason) )
# it ran - we are all done
return
# step the generator
self.queueNextSwitch( result )
except:
self.app.log.exception( 'ThreadSwitchScheduler(%d:%s)' % (self.instance_id, self.reason) )
def queueNextSwitch( self, generator ):
self.debugLogThreading( 'ThreadSwitchScheduler(%d:%s): generator %r' % (self.instance_id, self.reason, generator) )
# result tells where to schedule the generator to next
try:
where_to_go_next = next( generator )
except StopIteration:
# no problem all done
self.debugLogThreading( 'ThreadSwitchScheduler(%d:%s): done (StopIteration)' % (self.instance_id, self.reason) )
return
# will be one of app.runInForeground or app.runInForeground
self.debugLogThreading( 'ThreadSwitchScheduler(%d:%s): next %r' % (self.instance_id, self.reason, where_to_go_next) )
where_to_go_next( self.queueNextSwitch, (generator,) )
#------------------------------------------------------------
#
# Used to allow a call to function on the background thread
# to block until the result return on the main thread is available
#
#------------------------------------------------------------
class GetReturnFromCallingFunctionOnMainThread:
def __init__( self, app, function ):
self.app = app
self.function = function
self.cv = threading.Condition()
self.result = None
def __call__( self, *args ):
self.app.log.debug( 'CallFunctionOnMainThread.__call__ calling %r' % self.function )
self.cv.acquire()
self.app.runInForeground( self.__onMainThread, args )
self.cv.wait()
self.cv.release()
self.app.log.debug( 'CallFunctionOnMainThread.__call__ returning %r' % self.function )
return self.result
def __onMainThread( self, *args ):
self.app.log.debug( 'CallFunctionOnMainThread._onMainThread calling %r' % self.function )
try:
self.result = self.function( *args )
finally:
pass
self.cv.acquire()
self.cv.notify()
self.cv.release()
self.app.log.debug( 'CallFunctionOnMainThread._onMainThread returning %r' % self.function )
|
#Example copied from the RxD tutorial
#http://www.neuron.yale.edu/neuron/static/docs/rxd/index.html
from neuron import crxd as rxd, h, gui
from matplotlib import pyplot
import numpy
pyplot.ion()
sec = h.Section()
sec.L=100
sec.diam=1
sec.nseg=100
h.CVode().active(1)
caDiff = 0.016
ip3Diff = 0.283
cac_init = 1.0e-4
ip3_init = 0.1
gip3r = 12040
gserca = 0.3913
gleak = 6.020
kserca = 0.1
kip3 = 0.15
kact = 0.4
ip3rtau = 2000.0
#These parameters where missing in the tutorial so arbitrary values were chosen
#any resemblance to experimental values is purely coincidental.
fc = 0.7
fe = 0.3
caCYT_init=0.1
cyt = rxd.Region(h.allsec(), name='cyt', nrn_region='i', geometry=rxd.FractionalVolume(fc,surface_fraction=1))
er= rxd.Region(h.allsec(), name='er', geometry=rxd.FractionalVolume(fe/2.))
cyt_er_membrane = rxd.Region(h.allsec(), name='mem', geometry = rxd.ScalableBorder(1, on_cell_surface=False))
ca = rxd.Species({cyt, er}, d=caDiff, name="ca", charge=2, initial=caCYT_init)
ip3 = rxd.Species(cyt, d=ip3Diff, name="ip3", initial=ip3_init)
ip3r_gate_state = rxd.Species(cyt_er_membrane, name="gate", initial=0.8)
h_gate = ip3r_gate_state[cyt_er_membrane]
minf = ip3[cyt] * 1000. * ca[cyt] / (ip3[cyt] + kip3) / (1000. * ca[cyt] + kact)
k = gip3r * (minf * h_gate) ** 3
ip3r = rxd.MultiCompartmentReaction(ca[er],ca[cyt], k, k, membrane=cyt_er_membrane)
serca = rxd.MultiCompartmentReaction(ca[cyt],ca[er], gserca/((kserca / (1000. * ca[cyt])) ** 2 + 1), membrane=cyt_er_membrane, custom_dynamics=True)
leak = rxd.MultiCompartmentReaction(ca[er],ca[cyt], gleak, gleak, membrane=cyt_er_membrane)
ip3r = rxd.MultiCompartmentReaction(ca[er],ca[cyt], k, k, membrane=cyt_er_membrane)
ip3rg = rxd.Rate(h_gate, (1. / (1 + 1000. * ca[cyt] / (0.3)) - h_gate) / ip3rtau)
h.finitialize()
cacyt_trace = h.Vector()
cacyt_trace.record(ca[cyt].nodes(sec)(.5)[0]._ref_concentration)
caer_trace = h.Vector()
caer_trace.record(ca[er].nodes(sec)(.5)[0]._ref_concentration)
ip3_trace = h.Vector()
ip3_trace.record(ip3.nodes(sec)(.5)[0]._ref_concentration)
times = h.Vector()
times.record(h._ref_t)
h.finitialize()
cae_init = (0.0017 - cac_init *fc) / fe
ca[er].concentration = cae_init
for node in ip3.nodes:
if node.x < 0.2:
node.concentration = 2
h.CVode().re_init()
h.continuerun(1000)
pyplot.plot(times,cacyt_trace,label="ca[cyt]")
pyplot.plot(times,caer_trace,label="ca[er]")
pyplot.plot(times,ip3_trace,label="ip3")
pyplot.legend()
pyplot.show()
|
from talon import Context, Module, actions, ui
ctx = Context()
ctx.matches = r"""
os: linux
"""
@ctx.action_class("user")
class Actions:
def desktop(number: int):
ui.switch_workspace(number)
def desktop_next():
actions.user.desktop(ui.active_workspace() + 1)
def desktop_last():
actions.user.desktop(ui.active_workspace() - 1)
def desktop_show():
actions.key("super")
def window_move_desktop(desktop_number: int):
ui.active_window().workspace = desktop_number
actions.user.desktop(desktop_number)
def window_move_desktop_left():
actions.user.window_move_desktop(ui.active_workspace() - 1)
def window_move_desktop_right():
actions.user.window_move_desktop(ui.active_workspace() + 1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by vici on 20/07/2017
class ClassProperty:
def __init__(self, f):
self.f = f # where class.method input
print("init")
pass
def __get__(self, instance, owner):
print("get")
return self.f(owner)
def __set__(self, instance, value):
print("set")
class MyTestClass:
i = 0
def __init__(self):
pass
@ClassProperty
def my_i(self):
self.i = 6
return self.i
if __name__ == '__main__':
print(MyTestClass.my_i)
print(MyTestClass.i)
pass
|
"""
simple io use case - run another process, send it input, receive output
"""
from childprocess import ChildProcessBuilder as CPB
with CPB("lua -i -e \"_PROMPT=''\"").spawn() as cp:
# hello world code from lua demos
cp.stdin.write(b'io.write("Hello world, from ",_VERSION,"!\n")\n')
print(cp.stdout.readline())
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn import wamp
from autobahn.asyncio.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that subscribes and receives events,
and stop after having received 5 events.
"""
@asyncio.coroutine
def onJoin(self, details):
self.received = 0
## subscribe all methods on this object decorated with "@wamp.subscribe"
## as PubSub event handlers
##
results = yield from self.subscribe(self)
for res in results:
if isinstance(res, wamp.protocol.Subscription):
## res is an Subscription instance
print("Ok, subscribed handler with subscription ID {}".format(res.id))
else:
## res is an Failure instance
print("Failed to subscribe handler: {}".format(res))
@wamp.subscribe('com.myapp.topic1')
def onEvent1(self, i):
print("Got event on topic1: {}".format(i))
self.received += 1
if self.received > 5:
self.leave()
@wamp.subscribe('com.myapp.topic2')
def onEvent2(self, msg):
print("Got event on topic2: {}".format(msg))
def onDisconnect(self):
asyncio.get_event_loop().stop()
|
import RPi.GPIO as GPIO
import time
import os
#from gopro import run
import subprocess
def pushbutton():
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = GPIO.input(18)
if input_state == False:
print('Button Pressed')
time.sleep(0.2)
proc = subprocess.Popen(['python2 gopro.py'], shell=True)
pushbutton()
|
# -*-coding:utf-8-*-
import json
import markdown
from bson import ObjectId
from flask import request
from flask_babel import gettext
from flask_login import current_user
import time
from apps.core.flask.reqparse import arg_verify
from apps.modules.message.process.user_message import insert_user_msg
from apps.modules.upload.process.tempfile import clean_tempfile
from apps.utils.content_evaluation.content import content_inspection_text
from apps.utils.format.obj_format import json_to_pyseq
from apps.utils.text_parsing.text_parsing import richtext_extract_img
from apps.app import mdb_web
from apps.core.utils.get_config import get_config
__author__ = "Allen Woo"
def post_issue():
tid = request.argget.all('id')
title = request.argget.all('title', "").strip()
content = request.argget.all('content', "")
content_text = request.argget.all('content_text', "")
editor = request.argget.all('editor')
category = request.argget.all('category')
tags = json_to_pyseq(request.argget.all('tags', []))
issue_way = request.argget.all('issue_way', 'issue')
cover_url = request.argget.all('cover_url')
# 标签处理验证
tag_max_num = get_config("post", "TAG_MAX_NUM")
if len(tags) > tag_max_num:
data = {"msg": gettext("Up to {} tags are used").format(tag_max_num),
"msg_type": "w", "http_status": 403}
return data
tags = list(set(tags))
temp_tags = ""
for tag in tags:
s, r = arg_verify(
reqargs=[
(gettext("tag"), tag)], max_len=get_config(
"post", "TAG_MAX_LEN"))
if not s:
return r
temp_tags = "{} {}".format(tag, temp_tags)
# 分类验证
try:
ObjectId(category)
except BaseException:
category = None
# Title 处理
s, r = arg_verify(
reqargs=[
(gettext("title"), title.strip())], max_len=get_config(
"post", "TITLE_MAX_LEN"), required=True)
if not s:
return r
# content
s, r = arg_verify(
reqargs=[
(gettext("content"), content.strip()), ("editor", editor)], required=True)
if not s:
return r
text_l = len(content_text)
if text_l > get_config("post", "BRIEF_LEN"):
brief_content = content_text[0:get_config("post", "BRIEF_LEN")]
else:
brief_content = content_text
s, r = arg_verify(
reqargs=[
(gettext("content"), content_text)], max_len=int(
get_config(
"post", "MAX_LEN")))
if not s:
data = r
else:
if issue_way == "issue":
issue_way = 1
else:
issue_way = 0
# 获取已上传的文章图片
old_imgs = []
if tid:
# 文章更新
post = mdb_web.db.post.find_one(
{"_id": ObjectId(tid), "user_id": current_user.str_id})
if post["issue_time"]:
# 有发布时间,则发布时间不改变
issue_time = post["issue_time"]
elif issue_way:
# 第一次发布
issue_time = time.time()
else:
# 不发布
issue_time = 0
old_imgs = post["imgs"]
elif issue_way:
# 发布时间
issue_time = time.time()
else:
# 不发布就不需要发布时间
issue_time = 0
# 获取文章中使用的图片
# 如果是markdown
if editor == "markdown":
srcs = richtext_extract_img(richtext=markdown.markdown(content))
else:
srcs = richtext_extract_img(richtext=content)
imgs = clean_tempfile(user_id=current_user.str_id,
type="image", old_file=old_imgs,
keey_file=srcs)
if not cover_url and len(imgs) > 0:
cover_url = imgs[0]
if issue_way:
r = content_inspection_text(
"{} {} {}".format(
title, content, temp_tags))
audit_score = r["score"]
audit_label = r["label"]
if r["label"] == "detection_off" or (
"suggestion" in r and r["suggestion"] == "review"):
# 未开启审核或无法自动鉴别, 等待人工审核
audited = 0
audit_way = "artificial"
elif r["label"] == "no_plugin":
# 没有检查插件
audited = 0
audit_way = "artificial"
else:
audit_label = r["label"]
audited = 1
audit_way = "auto"
else:
# 草稿
audit_label = None
audited = audit_score = 0
audit_way = "auto"
post = {
"title": title.strip(),
"content": content.strip(),
"brief_content": brief_content,
"category": category,
"tags": tags,
"issued": issue_way,
"issue_time": issue_time,
"update_time": time.time(),
"audited": audited,
"audit_score": audit_score,
"audit_user_id": None,
"audit_way": audit_way,
"audit_label": audit_label,
"word_num": text_l,
"is_delete": 0,
"imgs": imgs,
"cover_url": cover_url
}
if tid:
mdb_web.db.post.update_one({"_id": ObjectId(tid), "user_id": current_user.str_id}, {
"$set": post}, upsert=True)
else:
post["comment_num"] = 0
post["like"] = 0
post["like_user_id"] = []
post["user_id"] = current_user.str_id
post["editor"] = editor
r = mdb_web.db.post.insert_one(post)
tid = r.inserted_id
# 如果已审核, 并且分数高于最高检查违规分, 给用户通知
if audited and issue_way and audit_score >= get_config(
"content_inspection", "ALLEGED_ILLEGAL_SCORE"):
insert_user_msg(
user_id=post["user_id"],
ctype="notice",
label="audit_failure",
title=gettext("[Label:{}]Post allegedly violated").format(audit_label),
content={
"text": post["brief_content"]},
target_id=str(tid),
target_type="post")
if issue_way:
data = {
"msg": gettext("Issue success"),
"msg_type": "s",
"http_status": 201}
else:
data = {
"msg": gettext("Save success"),
"msg_type": "s",
"http_status": 201}
return data
def post_delete():
ids = json_to_pyseq(request.argget.all('ids', []))
recycle = int(request.argget.all('recycle', 1))
if recycle:
is_delete = 1
msg = gettext("Removed to recycle bin")
else:
is_delete = 2
msg = gettext("Delete the success")
for i, tid in enumerate(ids):
ids[i] = ObjectId(tid)
r = mdb_web.db.post.update_one({"_id": {"$in": ids},
"user_id": current_user.str_id},
{"$set": {"is_delete": is_delete}})
if r.modified_count:
data = {"msg": gettext("{},{}").format(msg, r.modified_count),
"msg_type": "s", "http_status": 201}
else:
data = {
"msg": gettext("No match to relevant data"),
"msg_type": "w",
"http_status": 400}
return data
def post_restore():
ids = json_to_pyseq(request.argget.all('ids', []))
if not isinstance(ids, list):
ids = json.loads(ids)
for i, tid in enumerate(ids):
ids[i] = ObjectId(tid)
r = mdb_web.db.post.update_one({"_id": {"$in": ids},
"user_id": current_user.str_id,
"is_delete": 1},
{"$set": {"is_delete": 0}})
if r.modified_count:
data = {"msg": gettext("Restore success,{}").format(r.modified_count),
"msg_type": "s", "http_status": 201}
else:
data = {
"msg": gettext("Restore failed"),
"msg_type": "w",
"http_status": 400}
return data
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_ProjectionTrajectoriesMVOU [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionTrajectoriesMVOU&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-mvou-covariance-evolution).
# ## Prepare the environment
# +
import os.path as path
import sys,os
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import arange, imag, array, ones, zeros, cos, sin, pi, where, linspace, diag, \
sqrt, tile, r_, real, diagflat
from numpy.linalg import eig, solve
from scipy.linalg import block_diag
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, legend, xlim, ylim, subplots, xlabel, title, xticks
from matplotlib import gridspec
from matplotlib.pyplot import ylabel
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('seaborn')
from pcacov import pcacov
from ARPM_utils import save_plot
from ProjMomentsVAR1MVOU import ProjMomentsVAR1MVOU
# See also A. Meucci (2009)
# "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck"
# available at ssrn.com
# input parameters
# horizon
t_end = 0.3 # 0.3 y
dt = 1 / 400
horiz_u = arange(0,t_end+dt,dt)
u_ = len(horiz_u)
k_ = 1 # number of real eigenvalues
j_ = 1 # pairs of complex eigenvalues
mu = zeros((k_ + 2*j_, 1)) # drift vector
theta = array([[-10 ** -5, -120, -10], [- 120, 10, 210], [- 10, -210, 10]]) # transition matrix
sigma = array([[0.50, 0.7071, 0.50], [0.7071, 2.0, 1.0], [0.50, 1.0, 1.0]]) # scatter generator
sigma2 = [email protected]
x_0 = ones((k_ + 2*j_, 1)) # initial value
# -
# ## Compute the real block-diagonal matrix gamma, the matrix s
# ## and the initial value z_0 in the diagonalized coordinates.
lam, beta = eig(theta) # eigenvectors and eigenvalues of theta
alpha = real(beta) - imag(beta) # real matrix of eigenvectors
gamma_ja = real(lam[1])
gamma_jb = imag(lam[1])
gamma = block_diag(lam[0], array([[gamma_ja, gamma_jb], [-gamma_jb, gamma_ja]])) # real diagonal-block matrix
gamma = real(gamma)
z_0 = solve(alpha,x_0 - solve(theta,mu))
s = solve(alpha,sigma)
s2 = [email protected]
# ## Project the conditional first and second moments of the MVOU process at future horizons using function ProjMomentsVAR1MVOU in the original coordinates
# ### and compute the location-dispersion ellipsoid and the corresponding principal components.
# +
th = linspace(0,2*pi,21)
i_ = len(th)
ph = linspace(0,2*pi,21)
p_ = len(ph)
ell_x1 = zeros((i_, p_, u_))
ell_x1a = zeros((i_, p_, u_))
ell_x1b = zeros((i_, p_, u_))
princdir_x = zeros((k_ + 2*j_, k_ + 2*j_, u_)) # principal direction
x_mu_u, x_sigma2_u, x_drift_u = ProjMomentsVAR1MVOU(x_0, horiz_u, mu, theta, sigma2)
for u in range(1,u_):
[eigvec_x, eigval_x] = pcacov(x_sigma2_u[:,:, u])
for i in range(i_):
for p in range(p_):
y_x =r_[sin(th[i])*cos(ph[p]), sin(th[i])*sin(ph[p]), cos(th[i])]
# compute the location-dispersion ellipsoid
ellipsoid_x = x_drift_u[:,u] + eigvec_x@diag(sqrt(eigval_x))@y_x
ell_x1[i, p, u] = ellipsoid_x[0]
ell_x1a[i, p, u] = ellipsoid_x[1]
ell_x1b[i, p, u] = ellipsoid_x[2]
# compute the principal directions of the ellipsoid
princdir_x[:,:, u] = tile(x_drift_u[:, [u]], (1, k_ + 2*j_)) + eigvec_x@sqrt(diagflat(eigval_x))
# -
# ## Projects the conditional first and second moments of the MVOU process at future horizons using function ProjMomentsVAR1MVOU in the diagonalized coordinates
# ### and compute the location-dispersion ellipsoid and the corresponding principal components.
# +
ell_z1 = zeros((i_, p_, u_))
ell_z1a = zeros((i_, p_, u_))
ell_z1b = zeros((i_, p_, u_))
princdir_z = zeros((k_ + 2*j_, k_ + 2*j_, u_))
z_mu_u, z_s2_u, z_drift_u = ProjMomentsVAR1MVOU(z_0, horiz_u, mu, gamma, s2)
for u in range(1,u_):
# compute the ellipsoid
[eigvec_z, eigval_z] = pcacov(z_s2_u[:,:, u])
for i in range(i_):
for p in range(p_):
y_z =[sin(th[i])*cos(ph[p]), sin(th[i])*sin(ph[p]), cos(th[i])]
# compute the location-dispersion ellipsoid
ellipsoid_z = z_drift_u[:,u] + eigvec_z@diag(sqrt(eigval_z))@y_z
ell_z1[i, p, u] = ellipsoid_z[0]
ell_z1a[i, p, u] = ellipsoid_z[1]
ell_z1b[i, p, u] = ellipsoid_z[2]
# compute the principal directions of the ellipsoid
princdir_z[:,:, u] = tile(z_drift_u[:, [u]], (1, k_ + 2*j_)) + eigvec_z@sqrt(diag(eigval_z))
# -
# ## Plot the the conditional expectation and the location-dispersion ellipsoid stemming from the covariance
# ## both in the original coordinates and in the diagonalized coordinates at the selected horizons (2 months and 4 months),
# ## along with the principal components and the current position of the conditional mean.
# ## Then plot separately each component of the conditional expectation,
# ## both in the original coordinates and in the diagonalized ones,
# ## highlighting the current position of the conditional expectation.
# +
lgrey = [0.8, 0.8, 0.8] # light grey
hor_sel = t_end*0.5 # 2 months
hor_sel2 = t_end # 4 months
i1 = where(horiz_u == hor_sel)[0][0]
i2 = where(horiz_u == hor_sel2)[0][0]
for i in [i1,i2]:
plt.figure()
gs = gridspec.GridSpec(6, 2)
ax11 = plt.subplot(gs[:3, 0], projection='3d')
ax12 = plt.subplot(gs[3, 0])
ax13 = plt.subplot(gs[4, 0])
ax14 = plt.subplot(gs[5, 0])
ax21 = plt.subplot(gs[:3, 1], projection='3d')
ax22 = plt.subplot(gs[3, 1])
ax23 = plt.subplot(gs[4, 1])
ax24 = plt.subplot(gs[5, 1])
# 3-d graph conditional expectation and location-dispersion ellipsoid (original coordinates)
plt.sca(ax11)
ax11.view_init(16, -126)
xlim([min(x_drift_u[0]), max(x_drift_u[0])])
ylim([min(x_drift_u[1]), max(x_drift_u[1])])
ax11.set_zlim([min(x_drift_u[2,:]), max(x_drift_u[2,:])])
l1 = plot(x_drift_u[0, :i], x_drift_u[1, :i], x_drift_u[2, :i]) # conditional mean
ax11.contour(ell_x1[:,:, i], ell_x1a[:,:, i], ell_x1b[:,:, i], 15,colors=[lgrey], linewidths=0.5) # location-dispersion ellipsoid
# current position of the conditional mean
l2 = ax11.plot(x_drift_u[0,[i]],x_drift_u[1,[i]],x_drift_u[2,[i]])
# # principal directions
l3 = ax11.plot([x_drift_u[0,[i]], princdir_x[0, 0, [i]]],[x_drift_u[1, [i]], princdir_x[1, 0, [i]]],[x_drift_u[2, i], princdir_x[2, 0, i]], c='r')
ax11.plot([x_drift_u[0,[i]], princdir_x[0, 1, [i]]],[x_drift_u[1, [i]], princdir_x[1, 1, [i]]], [x_drift_u[2, i], princdir_x[2, 1, i]], c='r')
ax11.plot([x_drift_u[0,[i]], princdir_x[0, 2, [i]]],[x_drift_u[1, [i]], princdir_x[1, 2, [i]]], [x_drift_u[2, i], princdir_x[2, 2, i]], c='r')
title('Original coordinates')
xlabel('$X_1$', labelpad=10)
ylabel('$x_{1a}$', labelpad=10)
ax11.set_zlabel('$x_{1b}$', labelpad=10)
# Components fo the conditional expectation (original coordinates)
plt.sca(ax12)
# x_1
xlim([min(horiz_u), max(horiz_u)])
ylim([min(x_drift_u[0]), max(x_drift_u[0])])
# xticks(arange(0,t_end+0.1,0.1))
plot(horiz_u[:i], x_drift_u[0, :i])
plot(horiz_u[[i]],x_drift_u[0,[i]],color='g',marker='.',markersize=15)
ylabel('$x_1$',rotation=0,labelpad=10)
# x_1a
plt.sca(ax13)
xlim([min(horiz_u), max(horiz_u)])
ylim([min(x_drift_u[1]), max(x_drift_u[1])])
# xticks(arange(0,t_end+0.1,0.1))
plot(horiz_u[:i], x_drift_u[1, :i])
plot(horiz_u[i], x_drift_u[1, i],color='g',marker='.',markersize=15)
ylabel('$x_{1a}$',rotation=0,labelpad=10)
# # x_1b
plt.sca(ax14)
xlim([min(horiz_u), max(horiz_u)])
ylim([min(x_drift_u[2,:]), max(x_drift_u[2,:])])
xticks(arange(0,t_end+0.1,0.1))
plot(horiz_u[:i], x_drift_u[2, :i])
plot(horiz_u[i],x_drift_u[2, i],color='g',marker='.',markersize=15)
ylabel('$x_{1b}$',rotation=0,labelpad=10)
xlabel('Horizon')
# # 3-d graph conditional expectation and location-dispersion ellipsoid (diagonal coordinates)
plt.sca(ax21)
ax21.view_init(16, -126)
xlim([min(z_drift_u[0]), max(z_drift_u[0])])
ylim([min(z_drift_u[1]), max(z_drift_u[1])])
ax21.set_zlim([min(z_drift_u[2,:]), max(z_drift_u[2,:])])
ax21.plot(z_drift_u[0, :i], z_drift_u[1, :i], z_drift_u[2, :i]) # conditional mean
ax21.contour(ell_z1[:,:, i], ell_z1a[:,:, i], ell_z1b[:,:, i], 15, colors=[lgrey], linewidths=0.5) # location-dispersion ellipsoid
# current position of the conditional mean
ax21.plot(z_drift_u[0,[i]],z_drift_u[1,i],z_drift_u[2, i],c='g',marker='.',markersize= 15)
# principal directions
dir_z1 = plot([z_drift_u[0,i], princdir_z[0, 0, i]],[z_drift_u[1, i], princdir_z[1, 0, i]], [z_drift_u[2, i], princdir_z[2, 0, i]])
dir_z1a = plot([z_drift_u[0,i], princdir_z[0, 1, i]],[z_drift_u[1, i], princdir_z[1, 1, i]], [z_drift_u[2, i], princdir_z[2, 1, i]])
dir_z1b = plot([z_drift_u[0,i], princdir_z[0, 2, i]],[z_drift_u[1, i], princdir_z[1, 2, i]], [z_drift_u[2, i], princdir_z[2, 2, i]])
xlabel('$Z_1$', labelpad=10)
ylabel('$z_{1a}$', labelpad=10)
ax21.set_zlabel('$z_{1b}$', labelpad=10)
title('Diagonal coordinates')
# Components of the conditional expectation (diagonal coordinates)
# z_1
plt.sca(ax22)
xlim([min(horiz_u), max(horiz_u)])
ylim([min(z_drift_u[0]), max(z_drift_u[0])])
# xticks(arange(0,t_end+0.1,0.1))
plot(horiz_u[:i], z_drift_u[0, :i])
plot(horiz_u[i], z_drift_u[0,i], color='g',marker='.',markersize=15)
ylabel('$z_1$',rotation=0,labelpad=10)
# z_1a
plt.sca(ax23)
xlim([min(horiz_u), max(horiz_u)])
ylim([min(z_drift_u[1]), max(z_drift_u[1])])
# xticks(arange(0,t_end+0.1,0.1))
plot(horiz_u[:i], z_drift_u[1, :i])
plot(horiz_u[i], z_drift_u[1, i],color='g',marker='.',markersize=15)
ylabel('$z_{1a}$',rotation=0,labelpad=10)
# z_1b
plt.sca(ax24)
xlim([min(horiz_u), max(horiz_u)])
ylim([min(z_drift_u[2,:]), max(z_drift_u[2,:])])
plot(horiz_u[:i], z_drift_u[2, :i])
plot(horiz_u[i],z_drift_u[2, i], color='g',marker='.',markersize=15)
xlabel('Horizon')
ylabel('$z_{1b}$',rotation=0,labelpad=10)
l4 = ax24.plot(0,0,c=lgrey);
plt.sca(ax11)
legend(handles=[l1[0],l4[0],l3[0],l2[0]],
labels=['Conditional expect.','Conditional covar.','Principal dir.','Current pos.'],
bbox_to_anchor=(0., 1.01, 2.2, .122), loc='upper center',
ncol=4, mode="expand");
plt.tight_layout()
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
|
# -*- coding: utf-8 -*-
import json
class ColorDump:
"""
Provide some method to dump ``colors.naming.ColorNames`` results as JSON
or Sass.
"""
def as_json(self, datas, fp=None):
"""
Dump given datas as JSON in a file or as a string
Args:
datas (object): Any object suitable to be encoded to JSON.
Keyword args:
fp (file object): Optionnal fileobject where to write JSON.
Returns:
string: Either JSON data as a string if no fileobject was given
or fileobject filename.
"""
if fp:
json.dump(datas, fp=fp, indent=4)
return fp.name
else:
return json.dumps(datas, indent=4) |
import uuid
from .. import models
from ...objects import PlotContext, PlotObject, recursively_traverse_plot_object
import logging
log = logging.getLogger(__name__)
def prune_and_get_valid_models(session, delete=False):
"""retrieve all models that the plot_context points to.
if delete is True,
wipe out any models that are orphaned. Also call transform_models, which
performs any backwards compatability data transformations.
"""
objs = recursively_traverse_plot_object(session.plotcontext)
print("num models", len(objs))
if delete:
for obj in session._models.values():
if obj not in objs:
#not impl yet...
session.del_obj(obj)
return objs
def new_doc(flaskapp, docid, title, session, rw_users=None, r_users=None,
apikey=None, readonlyapikey=None):
if not apikey: apikey = str(uuid.uuid4())
if not readonlyapikey: readonlyapikey = str(uuid.uuid4())
plot_context = PlotContext()
session.add(plot_context)
session.store_all()
if rw_users is None: rw_users = []
if r_users is None: r_users = []
doc = Doc(docid, title, rw_users, r_users,
session.get_ref(plot_context), apikey, readonlyapikey)
doc.save(flaskapp.servermodel_storage)
return doc
class Doc(models.ServerModel):
typename = 'doc'
idfield = 'docid'
def __init__(self, docid, title, rw_users, r_users,
plot_context_ref, apikey, readonlyapikey):
self.docid = docid
self.title = title
self.rw_users = rw_users
self.r_users = r_users
self.plot_context_ref = plot_context_ref
self.apikey = apikey
self.readonlyapikey = readonlyapikey
def to_json(self):
return {'docid' : self.docid,
'title' : self.title,
'rw_users' : self.rw_users,
'r_users' : self.r_users,
'plot_context_ref' : self.plot_context_ref,
'apikey' : self.apikey,
'readonlyapikey' : self.readonlyapikey
}
@classmethod
def load(cls, client, objid):
attrs = cls.load_json(client, objid)
#adding readonly api key if it's not there
if 'readonlyapikey' not in attrs:
attrs['readonlyapikey'] = str(uuid.uuid4())
obj = cls.from_json(attrs)
obj.save(client)
return obj
@staticmethod
def from_json(obj):
return Doc(obj['docid'], obj['title'],
obj['rw_users'], obj['r_users'],
obj['plot_context_ref'], obj['apikey'],
obj['readonlyapikey'])
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import warnings
from cloudify.endpoint import ManagerEndpoint, LocalEndpoint
from cloudify.logs import init_cloudify_logger
from cloudify import exceptions
DEPLOYMENT = 'deployment'
NODE_INSTANCE = 'node-instance'
RELATIONSHIP_INSTANCE = 'relationship-instance'
class ContextCapabilities(object):
"""Maps from instance relationship target ids to their respective
runtime properties
"""
def __init__(self, endpoint, instance):
self._endpoint = endpoint
self.instance = instance
self._relationship_runtimes = None
def _find_item(self, key):
"""
Returns the capability for the provided key by iterating through all
dependency nodes available capabilities.
"""
ls = [caps for caps in self._capabilities.values() if key in caps]
if len(ls) == 0:
return False, None
if len(ls) > 1:
raise exceptions.NonRecoverableError(
"'{0}' capability ambiguity [capabilities={1}]".format(
key, self._capabilities))
return True, ls[0][key]
def __getitem__(self, key):
found, value = self._find_item(key)
if not found:
raise exceptions.NonRecoverableError(
"capability '{0}' not found [capabilities={1}]".format(
key, self._capabilities))
return value
def __contains__(self, key):
found, _ = self._find_item(key)
return found
def get_all(self):
"""Returns all capabilities as dict."""
return self._capabilities
def __str__(self):
return ('<' + self.__class__.__name__ + ' ' +
str(self._capabilities) + '>')
@property
def _capabilities(self):
if self._relationship_runtimes is None:
self._relationship_runtimes = {}
for relationship in self.instance.relationships:
self._relationship_runtimes.update({
relationship.target.instance.id:
relationship.target.instance.runtime_properties
})
return self._relationship_runtimes
class CommonContext(object):
def __init__(self, ctx=None):
self._context = ctx or {}
self._local = ctx.get('local', False)
if self._local:
# there are times when this instance is instantiated merely for
# accessing the attributes so we can tolerate no storage (such is
# the case in logging)
self._endpoint = LocalEndpoint(self, ctx.get('storage'))
else:
self._endpoint = ManagerEndpoint(self)
self.blueprint = BlueprintContext(self._context)
self.deployment = DeploymentContext(self._context)
class BootstrapContext(object):
"""
Holds bootstrap context that was posted to the rest service. (usually
during the bootstrap process).
"""
class PolicyEngine(object):
"""Cloudify policy engine related configuration"""
def __init__(self, policy_engine):
self._policy_engine = policy_engine
@property
def start_timeout(self):
"""
Returns the number of seconds to wait for the policy engine
to start
"""
return self._policy_engine.get('start_timeout')
class CloudifyAgent(object):
"""Cloudify agent related bootstrap context properties."""
def __init__(self, cloudify_agent):
self._cloudify_agent = cloudify_agent
@property
def min_workers(self):
"""Returns the minimum number of workers for agent hosts."""
return self._cloudify_agent.get('min_workers')
@property
def max_workers(self):
"""Returns the maximum number of workers for agent hosts."""
return self._cloudify_agent.get('max_workers')
@property
def user(self):
"""
Returns the username used when SSH-ing during agent
installation.
"""
return self._cloudify_agent.get('user')
@property
def remote_execution_port(self):
"""
Returns the port used when SSH-ing during agent
installation.
"""
return self._cloudify_agent.get('remote_execution_port')
@property
def agent_key_path(self):
"""
Returns the path to the key file on the management machine
used when SSH-ing during agent installation.
"""
return self._cloudify_agent.get('agent_key_path')
def __init__(self, bootstrap_context):
self._bootstrap_context = bootstrap_context
cloudify_agent = bootstrap_context.get('cloudify_agent', {})
policy_engine = bootstrap_context.get('policy_engine', {})
self._cloudify_agent = self.CloudifyAgent(cloudify_agent)
self._policy_engine = self.PolicyEngine(policy_engine)
@property
def cloudify_agent(self):
"""
Returns Cloudify agent related bootstrap context data
:rtype: CloudifyAgent
"""
return self._cloudify_agent
@property
def policy_engine(self):
"""
Returns Cloudify policy engine related bootstrap context data
:rtype: PolicyEngine
"""
return self._policy_engine
@property
def resources_prefix(self):
"""
Returns the resources prefix that was configured during bootstrap.
An empty string is returned if the resources prefix was not configured.
"""
return self._bootstrap_context.get('resources_prefix', '')
class EntityContext(object):
def __init__(self, context, **_):
self._context = context
class BlueprintContext(EntityContext):
@property
def id(self):
"""The blueprint id the plugin invocation belongs to."""
return self._context.get('blueprint_id')
class DeploymentContext(EntityContext):
@property
def id(self):
"""The deployment id the plugin invocation belongs to."""
return self._context.get('deployment_id')
class NodeContext(EntityContext):
def __init__(self, *args, **kwargs):
super(NodeContext, self).__init__(*args, **kwargs)
self._endpoint = kwargs['endpoint']
self._node = None
def _get_node_if_needed(self):
if self._node is None:
self._node = self._endpoint.get_node(self.id)
props = self._node.get('properties', {})
self._node['properties'] = ImmutableProperties(props)
@property
def id(self):
"""The node's id"""
return self.name
@property
def name(self):
"""The node's name"""
return self._context.get('node_name')
@property
def properties(self):
"""The node properties as dict (read-only).
These properties are the properties specified in the blueprint.
"""
self._get_node_if_needed()
return self._node.properties
class NodeInstanceContext(EntityContext):
def __init__(self, *args, **kwargs):
super(NodeInstanceContext, self).__init__(*args, **kwargs)
self._endpoint = kwargs['endpoint']
self._node = kwargs['node']
self._modifiable = kwargs['modifiable']
self._node_instance = None
self._host_ip = None
self._relationships = None
def _get_node_instance_if_needed(self):
if self._node_instance is None:
self._node_instance = self._endpoint.get_node_instance(self.id)
self._node_instance.runtime_properties.modifiable = \
self._modifiable
@property
def id(self):
"""The node instance id."""
return self._context.get('node_id')
@property
def runtime_properties(self):
"""The node instance runtime properties as a dict (read-only).
Runtime properties are properties set during the node instance's
lifecycle.
Retrieving runtime properties involves a call to Cloudify's storage.
"""
self._get_node_instance_if_needed()
return self._node_instance.runtime_properties
def update(self):
"""
Stores new/updated runtime properties for the node instance in context
in Cloudify's storage.
This method should be invoked only if its necessary to immediately
update Cloudify's storage with changes. Otherwise, the method is
automatically invoked as soon as the task execution is over.
"""
if self._node_instance is not None and self._node_instance.dirty:
self._endpoint.update_node_instance(self._node_instance)
self._node_instance = None
def _get_node_instance_ip_if_needed(self):
self._get_node_instance_if_needed()
if self._host_ip is None:
if self.id == self._node_instance.host_id:
self._host_ip = self._endpoint.get_host_node_instance_ip(
host_id=self.id,
properties=self._node.properties,
runtime_properties=self.runtime_properties)
else:
self._host_ip = self._endpoint.get_host_node_instance_ip(
host_id=self._node_instance.host_id)
@property
def host_ip(self):
"""
Returns the node instance host ip address.
This values is derived by reading the ``host_id`` from the relevant
node instance and then reading its ``ip`` runtime property or its
node_state ``ip`` property.
"""
self._get_node_instance_ip_if_needed()
return self._host_ip
@property
def relationships(self):
"""Returns a list of this instance relationships
:return: list of RelationshipContext
:rtype: list
"""
self._get_node_instance_if_needed()
if self._relationships is None:
self._relationships = [
RelationshipContext(relationship, self._endpoint, self._node)
for relationship in self._node_instance.relationships]
return self._relationships
class RelationshipContext(EntityContext):
"""Holds relationship instance data"""
def __init__(self, relationship_context, endpoint, node):
super(RelationshipContext, self).__init__(relationship_context)
self._node = node
target_context = {
'node_name': relationship_context['target_name'],
'node_id': relationship_context['target_id']
}
self._target = RelationshipSubjectContext(target_context, endpoint,
modifiable=False)
self._type_hierarchy = None
@property
def target(self):
"""Returns a holder for target node and target instance
:rtype: RelationshipSubjectContext
"""
return self._target
@property
def type(self):
"""The relationship type"""
return self._context.get('type')
@property
def type_hierarchy(self):
"""The relationship type hierarchy"""
if self._type_hierarchy is None:
self._node._get_node_if_needed()
node_relationships = self._node._node.relationships
self._type_hierarchy = [
r for r in node_relationships if
r['type'] == self.type][0]['type_hierarchy']
return self._type_hierarchy
class RelationshipSubjectContext(object):
"""Holds reference to node and node instance.
Obtained in relationship operations by `ctx.source` and `ctx.target`, and
by iterating instance relationships and for each relationship, reading
`relationship.target`
"""
def __init__(self, context, endpoint, modifiable):
self._context = context
self.node = NodeContext(context,
endpoint=endpoint)
self.instance = NodeInstanceContext(context,
endpoint=endpoint,
node=self.node,
modifiable=modifiable)
class CloudifyContext(CommonContext):
"""
A context object passed to plugins tasks invocations.
The context object is used in plugins when interacting with
the Cloudify environment::
from cloudify import ctx
@operation
def my_start(**kwargs):
# port is a property that was configured on the current instance's
# node
port = ctx.node.properties['port']
start_server(port=port)
"""
def __init__(self, ctx=None):
super(CloudifyContext, self).__init__(ctx=ctx)
self._logger = None
self._provider_context = None
self._bootstrap_context = None
self._host_ip = None
self._node = None
self._instance = None
self._source = None
self._target = None
self._operation = OperationContext(self._context.get('operation', {}))
capabilities_node_instance = None
if 'related' in self._context:
if self._context['related']['is_target']:
source_context = self._context
target_context = self._context['related']
else:
source_context = self._context['related']
target_context = self._context
self._source = RelationshipSubjectContext(source_context,
self._endpoint,
modifiable=True)
self._target = RelationshipSubjectContext(target_context,
self._endpoint,
modifiable=True)
if self._context['related']['is_target']:
capabilities_node_instance = self._source.instance
else:
capabilities_node_instance = self._target.instance
elif self._context.get('node_id'):
self._node = NodeContext(self._context,
endpoint=self._endpoint)
self._instance = NodeInstanceContext(self._context,
endpoint=self._endpoint,
node=self._node,
modifiable=True)
capabilities_node_instance = self._instance
self._capabilities = ContextCapabilities(self._endpoint,
capabilities_node_instance)
def _verify_in_node_context(self):
if self.type != NODE_INSTANCE:
raise exceptions.NonRecoverableError(
'ctx.node/ctx.instance can only be used in a {0} context but '
'used in a {1} context.'.format(NODE_INSTANCE, self.type))
def _verify_in_relationship_context(self):
if self.type != RELATIONSHIP_INSTANCE:
raise exceptions.NonRecoverableError(
'ctx.source/ctx.target can only be used in a {0} context but '
'used in a {1} context.'.format(RELATIONSHIP_INSTANCE,
self.type))
def _verify_in_node_or_relationship_context(self):
if self.type not in [NODE_INSTANCE, RELATIONSHIP_INSTANCE]:
raise exceptions.NonRecoverableError(
'capabilities can only be used in a {0}/{1} context but '
'used in a {2} context.'.format(NODE_INSTANCE,
RELATIONSHIP_INSTANCE,
self.type))
@property
def instance(self):
"""The node instance the operation is executed for.
This property is only relevant for NODE_INSTANCE context operations.
"""
self._verify_in_node_context()
return self._instance
@property
def node(self):
"""The node the operation is executed for.
This property is only relevant for NODE_INSTANCE context operations.
"""
self._verify_in_node_context()
return self._node
@property
def source(self):
"""Provides access to the relationship's operation source node and
node instance.
This property is only relevant for relationship operations.
"""
self._verify_in_relationship_context()
return self._source
@property
def target(self):
"""Provides access to the relationship's operation target node and
node instance.
This property is only relevant for relationship operations.
"""
self._verify_in_relationship_context()
return self._target
@property
def type(self):
"""The type of this context.
Available values:
- DEPLOYMENT
- NODE_INSTANCE
- RELATIONSHIP_INSTANCE
"""
if self._source:
return RELATIONSHIP_INSTANCE
if self._instance:
return NODE_INSTANCE
return DEPLOYMENT
@property
def execution_id(self):
"""
The workflow execution id the plugin invocation was requested from.
This is a unique value which identifies a specific workflow execution.
"""
return self._context.get('execution_id')
@property
def workflow_id(self):
"""
The workflow id the plugin invocation was requested from.
For example:
``install``, ``uninstall`` etc...
"""
return self._context.get('workflow_id')
@property
def task_id(self):
"""The plugin's task invocation unique id."""
return self._context.get('task_id')
@property
def task_name(self):
"""The full task name of the invoked task."""
return self._context.get('task_name')
@property
def task_target(self):
"""The task target (RabbitMQ queue name)."""
return self._context.get('task_target')
@property
def plugin(self):
"""The plugin name of the invoked task."""
return self._context.get('plugin')
@property
def operation(self):
"""
The current operation context.
"""
return self._operation
@property
def capabilities(self):
"""Maps from instance relationship target ids to their respective
runtime properties
NOTE: This feature is deprecated, use 'instance.relationships' instead.
"""
self._verify_in_node_or_relationship_context()
warnings.warn('capabilities is deprecated, use instance.relationships'
'instead', DeprecationWarning)
return self._capabilities
@property
def logger(self):
"""
A Cloudify context aware logger.
Use this logger in order to index logged messages in ElasticSearch
using logstash.
"""
if self._logger is None:
self._logger = self._init_cloudify_logger()
return self._logger
@property
def bootstrap_context(self):
"""
System context provided during the bootstrap process
:rtype: BootstrapContext
"""
if self._bootstrap_context is None:
context = self._endpoint.get_bootstrap_context()
self._bootstrap_context = BootstrapContext(context)
return self._bootstrap_context
def send_event(self, event):
"""
Send an event to rabbitmq
:param event: the event message
"""
self._endpoint.send_plugin_event(message=event)
@property
def provider_context(self):
"""Gets provider context which contains provider specific metadata."""
if self._provider_context is None:
self._provider_context = self._endpoint.get_provider_context()
return self._provider_context
def get_resource(self, resource_path):
"""
Retrieves a resource bundled with the blueprint as a string.
:param resource_path: the path to the resource. Note that this path is
relative to the blueprint file which was
uploaded.
"""
return self._endpoint.get_blueprint_resource(self.blueprint.id,
resource_path)
def download_resource(self, resource_path, target_path=None):
"""
Retrieves a resource bundled with the blueprint and saves it under a
local file.
:param resource_path: the path to the resource. Note that this path is
relative to the blueprint file which was
uploaded.
:param target_path: optional local path (including filename) to store
the resource at on the local file system.
If missing, the location will be a tempfile with a
generated name.
:returns: The path to the resource on the local file system (identical
to target_path parameter if used).
raises an ``cloudify.exceptions.HttpException``
:raises: ``cloudify.exceptions.HttpException`` on any kind
of HTTP Error.
:raises: ``IOError`` if the resource
failed to be written to the local file system.
"""
return self._endpoint.download_blueprint_resource(self.blueprint.id,
resource_path,
self.logger,
target_path)
def _init_cloudify_logger(self):
logger_name = self.task_id if self.task_id is not None \
else 'cloudify_plugin'
handler = self._endpoint.get_logging_handler()
return init_cloudify_logger(handler, logger_name)
class OperationContext(object):
def __init__(self, operation_context):
self._operation_context = operation_context or {}
if not isinstance(self._operation_context, dict):
raise exceptions.NonRecoverableError(
'operation_context is expected to be a dict but is:'
'{0}'.format(self._operation_context))
self._operation_retry = None
@property
def name(self):
"""The name of the operation."""
return self._operation_context.get('name')
@property
def retry_number(self):
"""The retry number (relevant for retries and recoverable errors)."""
return self._operation_context.get('retry_number')
@property
def max_retries(self):
"""The maximum number of retries the operation can have."""
return self._operation_context.get('max_retries')
def retry(self, message=None, retry_after=None):
"""Specifies that this operation should be retried.
Usage:
return ctx.operation.retry(message='...', retry_after=1000)
:param message A text message containing information about the reason
for retrying the operation.
:param retry_after How many seconds should the workflow engine wait
before re-executing the operation.
"""
self._operation_retry = exceptions.OperationRetry(
message=message,
retry_after=retry_after)
class ImmutableProperties(dict):
"""
Of course this is not actually immutable, but it is good enough to provide
an API that will tell you you're doing something wrong if you try updating
the static node properties in the normal way.
"""
@staticmethod
def _raise():
raise exceptions.NonRecoverableError(
'Cannot override read only properties')
def __setitem__(self, key, value):
self._raise()
def __delitem__(self, key):
self._raise()
def update(self, E=None, **F):
self._raise()
def clear(self):
self._raise()
def pop(self, k, d=None):
self._raise()
def popitem(self):
self._raise()
|
import json
from functools import reduce
import re
def get_data(str_obj, keys):
"""Extract data from a string-like json object.
Returns a dictionary made up of the 'keys' passed as arguments (expressed in dot notation)
and the corresponding value.
if the key does not exist then the resulting dictionary will not have that key.
Example
INPUT:
a= '{
"guid": "1234",
"content": {
"type": "text/html",
"title": "Challenge 1",
"entities": [ "1.2.3.4", "wannacry", "malware.com"]
},
"score": 74,
"time": 1574879179
}'
b = ["guid", "content.entities", "score", "score.sign"]
> get_data(a,b)
OUTPUT:
{ "guid": "1234", "content.entities": [ "1.2.3.4", "wannacry", "malware.com"], "score": 74 }
"""
obj = {}
if not str_obj:
return obj
try:
obj = json.loads(str_obj)
except ValueError:
raise
result = {}
for key in keys:
value = reduce(apply, key.split("."), obj)
if value:
result[key] = value
return result
def apply(dic, key):
partial = None
match = re.search("\[(\d)\]$", key) # match array index-like
if match:
idx = int(match.group(0)[1:-1])
key_name = key[:-3]
if isinstance(dic[key_name], list):
partial = dic[key_name][idx]
elif dic and not isinstance(dic, list):
partial = dic.get(key, None)
return partial
|
import timeit
import matplotlib.pyplot as plt
from Bubble_Sort import bubbleSort
from Merge_Sort import mergeSort
from Quick_Sort import quickSort
inputList = [8, 9, 1, 9, 8, 6, 5, 3, 2, 1, 57, 13, 23, 1, 123, 9, 1, 9, 8, 6,
5, 3, 2, 1, 57, 13, 23, 1, 123, 9, 1, 9, 8, 6, 5, 3, 2, 1, 57, 13, 23, 1, 123, 3, 2, 1, 57, 13, 23, 1, 123, 9, 1, 9, 8, 6, 5, 3, 2, 1, 57, 13, 23, 1, 123, 3, 2, 1, 57, 13, 23, 1, 123, 9, 1, 9, 8, 6, 5, 3, 2, 1, 57, 13, 23, 1, 123, 3, 2, 1, 57, 13, 23, 1, 123, 9, 1, 9, 8, 6, 5, 3, 2, 1, 57, 13, 23, 1, 123, 3, 2, 1, 57, 13, 23, 1, 123]
lengthInput = len(inputList)
loops = 1000
quickTime = timeit.timeit('quickSort(inputList.copy())',
'from __main__ import quickSort, inputList', number=loops)/loops
bubbleTime = timeit.timeit('bubbleSort(inputList.copy())',
'from __main__ import bubbleSort, inputList', number=loops)/loops
mergeTime = timeit.timeit('mergeSort(inputList.copy(), lengthInput)',
'from __main__ import mergeSort, inputList, lengthInput', number=loops)/loops
left = [1, 2, 3]
height = [bubbleTime, mergeTime, quickTime]
label = ['Bubble Sort', 'Merge Sort', 'Quick Sort']
plt.title('Length of array: ' + str(lengthInput) +
' Measurements: ' + str(loops))
plt.ylabel('seconds')
plt.gcf().canvas.set_window_title('Comparison of sorting algorithms')
plt.bar(left, height, tick_label=label,
width=0.8, color=['blue', 'red', 'green'])
plt.show()
|
''' http://www.biopython.org/DIST/docs/api/Bio.KDTree.KDTree%27-module.html '''
def photoz(list):
import sys, pyfits, os
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
r = []
for i in list[0]:
r.append(table.field('zp_best')[i])
print r
import pylab, scipy
a = scipy.array(r)
a, b, varp = pylab.hist(a,bins=scipy.arange(0,4,0.05))
pylab.xlabel("Z")
pylab.ylabel("Number of Galaxies")
pylab.show()
raw_input()
return
def tree(start,end):
import sys, pyfits, os
#caltable = '/tmp/' + cluster + 'output.cat' #sys.argv[1]
#print cluster, caltable
#hdulist = pyfits.open(caltable)
#table = hdulist["OBJECTS"].data
from scipy.spatial import KDTree
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
#file = os.environ['subdir'] + '/MACS1423+24/PHOTOMETRY/MACS1423+24.slr.cat'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
array = []
cols = []
lim_mags = {}
#for filter in ['MAG_APER1-MEGAPRIME-0-1-u']: # ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
for filter in ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
print hdulist['OBJECTS'].columns
for column in hdulist['OBJECTS'].columns:
if filter == column.name:
print column.format
cols.append(pyfits.Column(name=filter,format=column.format,array=hdulist['OBJECTS'].data.field(filter)[start:end]))
#import pylab, scipy
l = hdulist['OBJECTS'].data.field(filter)[start:end]
#a,b,varp = pylab.hist(l,bins=scipy.arange(20,30,0.1))
#print a, b
#c = zip(a,b)
#c.sort()
#lim_mags[filter] = c[-1][1]
#pylab.xlabel('Mag')
#pylab.ylabel('Number of Galaxies')
#pylab.show()
print cols
tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
from copy import copy
tbhdu_good=copy(tbhdu)
#mask = reduce(lambda x,y:x*y,[tbhdu.data.field(filter) < (lim_mags[filter]-1.) for filter in lim_mags.keys()])
#print len(tbhdu_good.data.field('umag')[mask])
for filter in ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
print hdulist['OBJECTS'].columns
for column in hdulist['OBJECTS'].columns:
if filter == column.name:
print column.format
cols.append(pyfits.Column(name=filter,format=column.format,array=hdulist['OBJECTS'].data.field(filter)[start:end]))
#import pylab, scipy
#l = hdulist['OBJECTS'].data.field(filter)[mask][0:length]
#pylab.clf()
#a,b,varp = pylab.hist(l,bins=scipy.arange(20,30,0.1))
#print a, b
#c = zip(a,b)
#c.sort()
#lim_mags[filter] = c[-1][1]
#pylab.xlabel('Mag')
#pylab.ylabel('Number of Galaxies')
#pylab.show()
tbhdu_bad=copy(tbhdu)
import scipy
p = scipy.array([[tbhdu.data[2200][i] for i in range(7)]])
print p
#return KDTree(p)
hdu = pyfits.PrimaryHDU()
thdulist = pyfits.HDUList([hdu,tbhdu])
#os.system('rm temp.fits')
#thdulist.writeto('temp.fits')
import numpy
sarray = (tbhdu.data.tolist())
print numpy.shape(sarray)
#a = KDTree(sarray)
print lim_mags
return sarray
|
from discord.ext import commands
from bot.exceptions import MemberNotRegistered
from bot.utils import create_embed
from bot.utils import get_tb_message
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.error_messages = {
commands.MissingRequiredArgument: "Invalid arguments",
MemberNotRegistered: ""
}
self.ignored_commands = [
"me"
]
self.ignored_errors = [
commands.CommandNotFound,
commands.CheckFailure,
commands.BadArgument
]
def is_ignored(self, ctx, error):
result = [type(error) in self.ignored_errors]
if ctx.command is not None:
result.append(ctx.command.name in self.ignored_commands)
return any(result)
async def get_error_channel(self):
return await self.bot.fetch_channel(505689773961117706)
@commands.Cog.listener()
async def on_error(self, event):
print(">>> on_error:", type(event))
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
channel = ctx.channel
if self.is_ignored(ctx, error):
return
if type(error) is commands.CommandInvokeError:
error = error.original
title = f"Error: {type(error).__name__}"
desc = self.error_messages.get(type(error))
if desc is None:
channel = await self.get_error_channel()
desc = (f"{ctx.author.mention}: `{ctx.message.content}`\n"
f"```py\n"
f"{get_tb_message(error)}\n"
f"```")
await channel.send(embed=create_embed(title, desc))
def setup(bot):
bot.add_cog(ErrorHandler(bot))
|
import itertools
import multiprocessing
import os
import subprocess
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Tuple, Callable, Union
import pandas as pd
from memory_profiler import profile
from networkx import DiGraph
from simod.readers.log_splitter import LogSplitter
from tqdm import tqdm
from . import support_utils as sup
from .analyzers import sim_evaluator
from .cli_formatter import print_step, print_notice
from .configuration import CalculationMethod, DataType, GateManagement
from .configuration import Configuration, PDFMethod, Metric
from .extraction.interarrival_definition import InterArrivalEvaluator
from .extraction.role_discovery import ResourcePoolAnalyser
from .extraction.schedule_tables import TimeTablesCreator
from .extraction.tasks_evaluator import TaskEvaluator
from .readers import bpmn_reader
from .readers import process_structure
from .readers.log_reader import LogReader
from .replayer_datatypes import BPMNGraph
# NOTE: This module needs better name and possible refactoring. At the moment it contains API, which is suitable
# for discovery and optimization. Before function from here were implemented often as static methods on specific classes
# introducing code duplication. We can stay with the functional approach, like I'm proposing at the moment, or create
# a more general class for Discoverer and Optimizer for them to inherit from it all the general routines.
@dataclass
class ProcessParameters:
instances: Union[int, None] = None
start_time: Union[str, None] = None
process_stats: pd.DataFrame = field(default_factory=pd.DataFrame)
resource_table: pd.DataFrame = field(default_factory=pd.DataFrame)
# conformant_traces: list = field(default_factory=list)
resource_pool: list = field(default_factory=list)
time_table: List[str] = field(default_factory=list)
arrival_rate: dict = field(default_factory=dict)
sequences: list = field(default_factory=list)
elements_data: list = field(default_factory=list)
# @profile(stream=open('logs/memprof_extract_structure_parameters.log', 'a+'))
def extract_structure_parameters(settings: Configuration, process_graph, log: LogReader,
model_path: Path) -> ProcessParameters:
settings.pdef_method = PDFMethod.DEFAULT # TODO: why do we overwrite it here?
traces = log.get_traces()
log_df = pd.DataFrame(log.data)
# process_stats, conformant_traces = replay_logs(process_graph, traces, settings)
resource_pool, time_table = mine_resources_wrapper(settings)
arrival_rate = mine_inter_arrival(process_graph, log_df, settings)
bpmn_graph = BPMNGraph.from_bpmn_path(model_path)
# sequences = mine_gateway_probabilities_stochastic(traces_raw, bpmn_graph)
# sequences = mine_gateway_probabilities_alternative(traces_raw, bpmn_graph)
sequences = mine_gateway_probabilities_alternative_with_gateway_management(
traces, bpmn_graph, settings.gate_management)
log_df['role'] = 'SYSTEM' # TODO: why is this necessary? in which case?
elements_data = process_tasks(process_graph, log_df, resource_pool, settings)
log_df = pd.DataFrame(log.data)
# num_inst = len(log_df.caseid.unique()) # TODO: should it be log_train or log_valdn
# start_time = log_df.start_timestamp.min().strftime("%Y-%m-%dT%H:%M:%S.%f+00:00")
return ProcessParameters(
process_stats=log_df,
resource_pool=resource_pool,
time_table=time_table,
arrival_rate=arrival_rate,
sequences=sequences,
elements_data=elements_data,
)
def extract_times_parameters(settings: Configuration, process_graph, log: LogReader, conformant_traces,
process_stats) -> ProcessParameters:
settings.pdef_method = PDFMethod.AUTOMATIC
time_table, resource_pool, resource_table = mine_resources_with_resource_table(log, settings)
arrival_rate = mine_inter_arrival(process_graph, conformant_traces, settings)
process_stats = process_stats.merge(resource_table, left_on='user', right_on='resource', how='left')
elements_data = process_tasks(process_graph, process_stats, resource_pool, settings)
return ProcessParameters(
time_table=time_table,
resource_pool=resource_pool,
resource_table=resource_table,
arrival_rate=arrival_rate,
elements_data=elements_data,
)
def mine_resources_wrapper(settings: Configuration) -> Tuple[list, List[str]]: # TODO: maybe this one is unnecessary
"""Analysing resource pool LV917 or 247"""
print_step('Resource Miner')
parameters = mine_resources(settings)
return parameters['resource_pool'], parameters['time_table']
def mine_inter_arrival(process_graph: DiGraph, conformant_traces: pd.DataFrame, settings: Configuration) -> dict:
print_step('Inter-arrival Miner')
inter_evaluator = InterArrivalEvaluator(process_graph, conformant_traces, settings)
return inter_evaluator.dist
def compute_sequence_flow_frequencies(log_traces: list, bpmn_graph: BPMNGraph):
flow_arcs_frequency = dict()
for trace in log_traces:
task_sequence = [event['task'] for event in trace]
bpmn_graph.replay_trace(task_sequence, flow_arcs_frequency)
return flow_arcs_frequency
def mine_gateway_probabilities(log_traces: list, bpmn_graph: BPMNGraph) -> list:
print_step('Mining gateway probabilities')
arcs_frequencies = compute_sequence_flow_frequencies(log_traces, bpmn_graph)
gateways_branching = bpmn_graph.compute_branching_probability(arcs_frequencies)
sequences = []
for gateway_id in gateways_branching:
for seqflow_id in gateways_branching[gateway_id]:
probability = gateways_branching[gateway_id][seqflow_id]
sequences.append({'elementid': seqflow_id, 'prob': probability})
return sequences
# TODO: make it accept gate_management option
def mine_gateway_probabilities_alternative(log_traces: list, bpmn_graph: BPMNGraph) -> list:
print_step('Mining gateway probabilities')
arcs_frequencies = compute_sequence_flow_frequencies(log_traces, bpmn_graph)
gateways_branching = bpmn_graph.compute_branching_probability_alternative_discovery(arcs_frequencies)
sequences = []
for gateway_id in gateways_branching:
for seqflow_id in gateways_branching[gateway_id]:
probability = gateways_branching[gateway_id][seqflow_id]
sequences.append({'elementid': seqflow_id, 'prob': probability})
return sequences
def mine_gateway_probabilities_alternative_with_gateway_management(log_traces: list, bpmn_graph: BPMNGraph,
gate_management: GateManagement) -> list:
if isinstance(gate_management, list) and len(gate_management) >= 1:
print_notice(f'A list of gateway management options was provided: {gate_management}, taking the first option: {gate_management[0]}')
gate_management = gate_management[0]
print_step(f'Mining gateway probabilities with {gate_management}')
if gate_management is GateManagement.EQUIPROBABLE:
gateways_branching = bpmn_graph.compute_branching_probability_alternative_equiprobable()
elif gate_management is GateManagement.DISCOVERY:
arcs_frequencies = compute_sequence_flow_frequencies(log_traces, bpmn_graph)
gateways_branching = bpmn_graph.compute_branching_probability_alternative_discovery(arcs_frequencies)
else:
raise Exception('Only GatewayManagement.DISCOVERY and .EQUIPROBABLE are supported')
sequences = []
for gateway_id in gateways_branching:
for seqflow_id in gateways_branching[gateway_id]:
probability = gateways_branching[gateway_id][seqflow_id]
sequences.append({'elementid': seqflow_id, 'prob': probability})
return sequences
def process_tasks(process_graph: DiGraph, process_stats: pd.DataFrame, resource_pool: list,
settings: Configuration):
print_step('Tasks Processor')
evaluator = TaskEvaluator(process_graph, process_stats, resource_pool, settings)
return evaluator.elements_data
def extract_process_graph(model_path) -> DiGraph:
bpmn = bpmn_reader.BpmnReader(model_path)
return process_structure.create_process_structure(bpmn)
def simulate(settings: Configuration, process_stats: pd.DataFrame, log_test, evaluate_fn: Callable = None):
if evaluate_fn is None:
evaluate_fn = evaluate_logs
# NOTE: from Discoverer
def pbar_async(p, msg):
pbar = tqdm(total=reps, desc=msg)
processed = 0
while not p.ready():
cprocesed = (reps - p._number_left)
if processed < cprocesed:
increment = cprocesed - processed
pbar.update(n=increment)
processed = cprocesed
time.sleep(1)
pbar.update(n=(reps - processed))
p.wait()
pbar.close()
reps = settings.repetitions
cpu_count = multiprocessing.cpu_count()
w_count = reps if reps <= cpu_count else cpu_count
pool = multiprocessing.Pool(processes=w_count)
# Simulate
args = [(settings, rep) for rep in range(reps)]
p = pool.map_async(execute_simulator, args)
pbar_async(p, 'simulating:')
# Read simulated logs
args = [(settings, rep) for rep in range(reps)]
p = pool.map_async(read_stats, args)
pbar_async(p, 'reading simulated logs:')
# Evaluate
args = [(settings, process_stats, log) for log in p.get()]
if len(log_test.caseid.unique()) > 1000:
pool.close()
results = [evaluate_fn(arg) for arg in tqdm(args, 'evaluating results:')]
sim_values = list(itertools.chain(*results))
else:
p = pool.map_async(evaluate_fn, args)
pbar_async(p, 'evaluating results:')
pool.close()
sim_values = list(itertools.chain(*p.get()))
return sim_values
def execute_simulator(args):
# NOTE: extracted from StructureOptimizer static method
def sim_call(settings: Configuration, rep):
args = ['java', '-jar', settings.bimp_path,
os.path.join(settings.output, settings.project_name + '.bpmn'),
'-csv',
os.path.join(settings.output, 'sim_data', settings.project_name + '_' + str(rep + 1) + '.csv')]
# NOTE: the call generates a CSV event log from a model
# NOTE: might fail silently, because stderr or stdout aren't checked
completed_process = subprocess.run(args, check=True, stdout=subprocess.PIPE)
message = f'Simulator debug information:' \
f'\n\targs = {completed_process.args()}' \
f'\n\tstdout = {completed_process.stdout.__str__()}' \
f'\n\tstderr = {completed_process.stderr.__str__()}'
print_notice(message)
sim_call(*args)
# def execute_simulator_simple(bimp_path, model_path, csv_output_path):
# args = ['java', '-jar', bimp_path, model_path, '-csv', csv_output_path]
# print('args', args)
# # NOTE: the call generates a CSV event log from a model
# # NOTE: might fail silently, because stderr or stdout aren't checked
# subprocess.run(args, check=True, stdout=subprocess.PIPE)
def read_stats(args):
# NOTE: extracted from StructureOptimizer static method
def read(settings: Configuration, rep):
m_settings = dict()
m_settings['output'] = settings.output
column_names = {'resource': 'user'}
m_settings['read_options'] = settings.read_options
m_settings['read_options'].timeformat = '%Y-%m-%d %H:%M:%S.%f'
m_settings['read_options'].column_names = column_names
m_settings['project_name'] = settings.project_name
temp = LogReader(os.path.join(m_settings['output'], 'sim_data',
m_settings['project_name'] + '_' + str(rep + 1) + '.csv'),
m_settings['read_options'],
verbose=False)
temp = pd.DataFrame(temp.data)
temp.rename(columns={'user': 'resource'}, inplace=True)
temp['role'] = temp['resource']
temp['source'] = 'simulation'
temp['run_num'] = rep + 1
temp = temp[~temp.task.isin(['Start', 'End'])]
return temp
return read(*args)
# TODO: name properly or modify/merge read_stats and read_stats_alt
def read_stats_alt(args):
# NOTE: extracted from Discoverer and Optimizer static method
def read(settings: Configuration, rep):
path = os.path.join(settings.output, 'sim_data')
log_name = settings.project_name + '_' + str(rep + 1) + '.csv'
rep_results = pd.read_csv(os.path.join(path, log_name), dtype={'caseid': object})
rep_results['caseid'] = 'Case' + rep_results['caseid']
rep_results['run_num'] = rep
rep_results['source'] = 'simulation'
rep_results.rename(columns={'resource': 'user'}, inplace=True)
rep_results['start_timestamp'] = pd.to_datetime(
rep_results['start_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
rep_results['end_timestamp'] = pd.to_datetime(
rep_results['end_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
return rep_results
return read(*args)
def evaluate_logs(args):
# NOTE: extracted from StructureOptimizer static method
def evaluate(settings: Configuration, data: pd.DataFrame, sim_log):
"""Reads the simulation results stats"""
rep = sim_log.iloc[0].run_num
sim_values = list()
evaluator = sim_evaluator.SimilarityEvaluator(data, sim_log, settings, max_cases=1000)
evaluator.measure_distance(Metric.DL)
sim_values.append({**{'run_num': rep}, **evaluator.similarity})
return sim_values
return evaluate(*args)
def evaluate_logs_with_add_metrics(args):
def evaluate(settings: Configuration, process_stats: pd.DataFrame, sim_log: pd.DataFrame):
rep = sim_log.iloc[0].run_num
sim_values = list()
evaluator = sim_evaluator.SimilarityEvaluator(process_stats, sim_log, settings, max_cases=1000)
metrics = [settings.sim_metric]
if settings.add_metrics:
metrics = list(set(list(settings.add_metrics) + metrics))
for metric in metrics:
evaluator.measure_distance(metric)
sim_values.append({**{'run_num': rep}, **evaluator.similarity})
return sim_values
return evaluate(*args)
def mine_resources(settings: Configuration):
parameters = dict()
settings.res_cal_met = CalculationMethod.DEFAULT
settings.res_dtype = DataType.DT247
settings.arr_cal_met = CalculationMethod.DEFAULT
settings.arr_dtype = DataType.DT247
time_table_creator = TimeTablesCreator(settings)
args = {'res_cal_met': settings.res_cal_met, 'arr_cal_met': settings.arr_cal_met}
if not isinstance(args['res_cal_met'], CalculationMethod):
args['res_cal_met'] = CalculationMethod.from_str(args['res_cal_met'])
if not isinstance(args['arr_cal_met'], CalculationMethod):
args['arr_cal_met'] = CalculationMethod.from_str(args['arr_cal_met'])
time_table_creator.create_timetables(args)
resource_pool = [
{'id': 'QBP_DEFAULT_RESOURCE', 'name': 'SYSTEM', 'total_amount': '100000', 'costxhour': '20',
'timetable_id': time_table_creator.res_ttable_name['arrival']}
]
parameters['resource_pool'] = resource_pool
parameters['time_table'] = time_table_creator.time_table
return parameters
def mine_resources_with_resource_table(log: LogReader, settings: Configuration):
def create_resource_pool(resource_table, table_name) -> list:
"""Creates resource pools and associate them the default timetable in BIMP format"""
resource_pool = [{'id': 'QBP_DEFAULT_RESOURCE', 'name': 'SYSTEM', 'total_amount': '20', 'costxhour': '20',
'timetable_id': table_name['arrival']}]
data = sorted(resource_table, key=lambda x: x['role'])
for key, group in itertools.groupby(data, key=lambda x: x['role']):
res_group = [x['resource'] for x in list(group)]
r_pool_size = str(len(res_group))
name = (table_name['resources'] if 'resources' in table_name.keys() else table_name[key])
resource_pool.append(
{'id': sup.gen_id(), 'name': key, 'total_amount': r_pool_size, 'costxhour': '20', 'timetable_id': name})
return resource_pool
print_step('Resource Miner')
res_analyzer = ResourcePoolAnalyser(log, sim_threshold=settings.rp_similarity)
ttcreator = TimeTablesCreator(settings)
args = {'res_cal_met': settings.res_cal_met,
'arr_cal_met': settings.arr_cal_met,
'resource_table': res_analyzer.resource_table}
if not isinstance(args['res_cal_met'], CalculationMethod):
args['res_cal_met'] = CalculationMethod.from_str(settings.res_cal_met)
if not isinstance(args['arr_cal_met'], CalculationMethod):
args['arr_cal_met'] = CalculationMethod.from_str(settings.arr_cal_met)
ttcreator.create_timetables(args)
resource_pool = create_resource_pool(res_analyzer.resource_table, ttcreator.res_ttable_name)
resource_table = pd.DataFrame.from_records(res_analyzer.resource_table)
return ttcreator.time_table, resource_pool, resource_table
# @profile(stream=open('logs/memprof_split_timeline.log', 'a+'))
def split_timeline(log: Union[LogReader, pd.DataFrame], size: float, one_ts: bool) -> Tuple[pd.DataFrame, pd.DataFrame, str]:
"""
Split an event log dataframe by time to perform split-validation.
preferred method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
log: LogRead, log to split.
size: float, validation percentage.
one_ts: bool, Support only one timestamp.
"""
if isinstance(log, LogReader):
log = pd.DataFrame(log.data)
# Split log data
splitter = LogSplitter(log)
partition1, partition2 = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(log)
# Check size and change time splitting method if necesary
if len(partition2) < int(total_events * 0.1):
partition1, partition2 = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
partition1 = pd.DataFrame(partition1)
partition2 = pd.DataFrame(partition2)
return partition1, partition2, key
def remove_outliers(log: Union[LogReader, pd.DataFrame]) -> pd.DataFrame:
if isinstance(log, LogReader):
event_log = pd.DataFrame(log.data)
else:
event_log = log
# calculating case durations
cases_durations = list()
for id, trace in event_log.groupby('caseid'):
duration = (trace['end_timestamp'].max() - trace['start_timestamp'].min()).total_seconds()
cases_durations.append({'caseid': id, 'duration_seconds': duration})
cases_durations = pd.DataFrame(cases_durations)
# merging data
event_log = event_log.merge(cases_durations, how='left', on='caseid')
# filtering rare events
unique_cases_durations = event_log[['caseid', 'duration_seconds']].drop_duplicates()
first_quantile = unique_cases_durations.quantile(0.1)
last_quantile = unique_cases_durations.quantile(0.9)
event_log = event_log[(event_log.duration_seconds <= last_quantile.duration_seconds) & (event_log.duration_seconds >= first_quantile.duration_seconds)]
event_log = event_log.drop(columns=['duration_seconds'])
return event_log
|
"""
Functions for manipulating Widevine DRM
"""
from Crypto.Cipher import AES
from Crypto.Hash import SHA1
from Crypto.Util.Padding import pad
from base64 import b16decode, b64decode, b64encode
import requests
import json
from uuid import UUID
from .widevine_pb2 import WidevineCencHeader
from construct.core import Prefixed, Struct, Const, Int8ub, Int24ub, Int32ub, \
Bytes, GreedyBytes, PrefixedArray, Default, If, this
WIDEVINE_SYSTEM_ID = UUID("edef8ba9-79d6-4ace-a3c8-27dcd51d21ed")
# Construct for a Widevine PSSH box
PSSH_BOX = Prefixed(
Int32ub,
Struct(
"type" / Const(b"pssh"),
"version" / Default(Int8ub, 1),
"flags" / Const(0, Int24ub),
"system_id" / Const(WIDEVINE_SYSTEM_ID.bytes, Bytes(16)),
"key_ids" / If(this.version == 1, PrefixedArray(Int32ub, Bytes(16))),
"data" / Prefixed(Int32ub, GreedyBytes)
),
includelength=True
)
VALID_TRACKS = ["AUDIO", "SD", "HD", "UHD1", "UHD2"]
def sign_request(request, key, iv):
"""
Sign request
Returns base64 signature
"""
hashed_request = SHA1.new(bytes(json.dumps(request), "ASCII"))
cipher = AES.new(b16decode(key),
AES.MODE_CBC, b16decode(iv))
ciphertext = cipher.encrypt(pad(hashed_request.digest(), 16))
return b64encode(ciphertext)
def get_keys(content_id, url, tracks, policy, signer, signer_key=None,
signer_iv=None):
"""
Get keys from widevine key server
"""
track_list = []
if isinstance(tracks, str):
tracks = tracks.upper().split(",")
# remove any invalid track types
for track in tracks:
if track in VALID_TRACKS:
track_list.append({"type": track})
request = {
"content_id": str(b64encode(bytes(content_id, "ASCII")), "ASCII"),
"policy": policy,
"drm_types": ["WIDEVINE", ],
"tracks": track_list,
}
request_data = {
"request": str(b64encode(bytes(json.dumps(request), "ASCII")),
"ASCII"),
"signer": signer
}
if signer_key is not None and signer_iv is not None:
signature = sign_request(request, signer_key, signer_iv)
request_data["signature"] = str(signature, "ASCII")
r = requests.post(url, data=json.dumps(request_data))
if r.status_code != 200:
raise Exception("Widevine request failed with status code {}".format(
r.status_code))
response = json.loads(b64decode(json.loads(r.text)["response"]))
return response
def generate_widevine_data(key_ids=None, provider=None, content_id=None):
"""
Generate basic Widevine PSSH data
Following Widevine requirements must have either a list a key IDs or a
content ID
"""
if key_ids is None and content_id is None:
raise Exception("Must provide either list of key IDs or content ID")
pssh_data = WidevineCencHeader()
if provider is not None:
pssh_data.provider = provider
if key_ids is not None:
for key_id in key_ids:
if isinstance(key_id, str):
key_id = UUID(key_id).bytes
elif isinstance(key_id, bytes) and len(key_id) == 32:
# assume a length 32 byte string is an encoded hex string
key_id = UUID(str(key_id, "ASCII")).bytes
elif isinstance(key_id, UUID):
key_id = key_id.bytes
pssh_data.key_id.append(key_id)
if content_id is not None:
if isinstance(content_id, str):
pssh_data.content_id = bytes(content_id, "UTF-8")
elif isinstance(content_id, bytes):
pssh_data.content_id = content_id
else:
raise TypeError("content_id should be string or bytes")
return pssh_data
def generate_pssh(key_ids=None, provider=None, content_id=None, version=1):
"""
Generate basic Widevine PSSH box
Defaults to creating version 1 PSSH box, with key IDs listed
"""
if key_ids is None:
raise Exception("Must provide a list of key IDs")
kids = []
for key_id in key_ids:
if isinstance(key_id, str):
key_id = UUID(key_id).bytes
elif isinstance(key_id, bytes):
key_id = UUID(str(key_id, "ASCII")).bytes
elif isinstance(key_id, UUID):
key_id = key_id.bytes
kids.append(key_id)
pssh_data = generate_widevine_data(kids, provider, content_id)
pssh = PSSH_BOX.build({
"version": version,
"key_ids": kids,
"data": pssh_data.SerializeToString()
})
return pssh
|
# -*- coding: utf-8 -*-
"""
manager.py
Classes
---------------------------------------
StatisticsManager
WormStatistics
A translation of Matlab code written by Jim Hokanson,
in the SegwormMatlabClasses GitHub repo. Original code paths:
DONE:
# of lines
SegwormMatlabClasses / +seg_worm / +stats / +hist / @manager 410
SegwormMatlabClasses / +seg_worm / +stats / event_specs.m 143
SegwormMatlabClasses / +seg_worm / +stats / movement_specs.m 81
SegwormMatlabClasses / +seg_worm / +stats / simple_specs.m 45
SegwormMatlabClasses / +seg_worm / +stats / specs.m 70
SegwormMatlabClasses / +seg_worm / +stats / +hist / docs (several CSV files)
SegwormMatlabClasses / +seg_worm / +stats / @hist 339
ALMOST COMPLETE:
SegwormMatlabClasses / +seg_worm / @stats 231
SegwormMatlabClasses / +seg_worm / +stats / @manager 61
MIGHT NOT BE NEEDED:
SegwormMatlabClasses / +seg_worm / +stats / +helpers / fexact.m 405
SegwormMatlabClasses / +seg_worm / +stats / +helpers / swtest.m 260
SegwormMatlabClasses / +seg_worm / +stats / @hist_specs 50 (unknown how used - @JimHokanson)
SegwormMatlabClasses / +seg_worm / +w / * 38 files, lots of lines of code... is this necessary?
TOTAL 2095 lines + several CSV files
unclear so far if the CSV files
are being loaded and used by the
code or if they are just docs
"""
from scipy import stats
class StatisticsManager(object):
"""
Properties
---------------------------------------
stats
p_worm
q_worm
Notes
---------------------------------------
Formerly seg_worm.stats.manager
"""
def __init__(self, exp_hists, ctl_hists):
"""
Initializes the Manager class.
Parameters
---------------------------------------
exp_hists
An array of exp_hist entries
ctl_hists
An array of ctl_hist entries
Notes
---------------------------------------
Formerly seg_worm.stats.manager.initObject
"""
n_objs = len(exp_hists.hists)
# p_t Initialization
#----------------------------------------------------------------------
# @JimHokanson comments
# :/ Sadly this needs to be done beforehand to be the same ...
# It might be updated during object initialization ...
#
# TODO: This looks nicely vectorized, but it breaks the organization
# significantly ...
#
# How much of an impact do we get if we move this to being computed
# for each object, instead of all of them at once?
# Formerly: p_t_all =
# mattest([exp_hists.mean_per_video]',[ctl_hists.mean_per_video]')
# http://www.mathworks.com/help/bioinfo/ref/mattest.html
# perform an unpaired t-test for differential expression with
# a standard two-tailed and two-sample t-test on every gene in DataX and DataY
# and return a p-value for each gene.
# PValues = mattest(DataX, DataY)
# p_t_all is a 726x1 matrix with values between 0 and 1.
t_statistics, p_values = stats.ttest_ind(exp_hists.mean_per_video,
ctl_hists.mean_per_video)
# Removed this line: [stats_objs.p_t] = sl.struct.dealArray(p_t_all)
# This is the main call to initialize each object
#----------------------------------------------------------------------
self.stats = []
for iObj in range(n_objs):
# seg_worm.stats.initObject
self.stats.append(WormStatistics(exp_hists[iObj],
ctl_hists[iObj],
p_values[iObj]))
"""
# Followup with properties that depend on the aggregate
#----------------------------------------------------------------------
[~, q_t_all] = mafdr([stats_objs.p_t])
[stats_objs.q_t] = sl.struct.dealArray(q_t_all)
[~, q_w_all] = mafdr([stats_objs.p_w])
[stats_objs.q_w] = sl.struct.dealArray(q_w_all)
self.p_worm = min([stats_objs.p_w])
self.q_worm = min([stats_objs.q_w])
"""
class WormStatistics(object):
"""
Notes
--------------------
Formerly: seg_worm.stats
Some of the statistics are aggegrate:
- p_value
- q_value
List of exclusive features:
properties
#TODO: Move to object that both hist and stats display
#
#ALSO: We need two, one for experiment and one for controls
#Definitions in: seg_worm.stats.hist
field
name
short_name
units
feature_category
hist_type
motion_type
data_type
#New properties
#-------------------------------------------------------------------
p_normal_experiment
p_normal_control
q_normal_experiment
q_normal_control
z_score_experiment
#From documentation:
#- no controls, this is empty
#- absent in controls, but 2+ experiments, Inf
#- present in 2+ controls, -Inf
#Technically, this is incorrect
#
z_score_control = 0 #By definition ...
p_t #Differential expression ...
# - function: mattest (bioinformatics toolbox)
# This doesn't seem like it is used ...
p_w = NaN #NaN Default value, if all videos have a valid value
#then this is not set
#NOTE: For the following, the corrections can be per strain or
#across strains. I think the current implementation is per strain.
#I'd like to go with the values used in the paper ...
q_t
#In the old code corrections were per strain or across all strains.
q_w
#In the old code corrections were per strain or across all strains.
#Current implementation is per strain, not across strains ...
p_significance
#pTValue
#pWValue
#qTValue
#qWValue
#-------------------------------------------------------------------
# z_score #not populated if no controls are provided ...
# mean #mean of the mean hist values
# std #std of the hist values
# n_samples ## of videos where the mean is not NaN
# p_normal = NaN #probability of being a normal distribution
# #
# # seg_worm.fex.swtest(data(i).dataMeans, 0.05, 0)
# q_normal #
"""
def __init__(self):
"""
blank initializer I believe.
"""
pass
def initObject(self, exp_hist, ctl_hist, p_t):
"""
I added p_t as a parameter because I think this is needed, but in the
code it seems not! Then why were the p-values calculated in Manager
at all??? - @MichaelCurrie
Formerly seg_worm.stats.initObject(obj,exp_hist,ctl_hist)
worm2StatsInfo Compute worm statistics information and save it to a file.
seg_worm.stats.initObject
See Also:
seg_worm.stats.helpers.swtest
"""
pass
"""
self.p_t = p_t
del(p_t)
ALPHA = 0.05 # Not really used since we don't examine H, just p
TAIL = 0
# NOTE: This should be passed in instead of being changed here ...
USE_OLD_CODE = False
# TODO: I'm not a big fan of all of this copying. I'd rather just copy an
# object (use name: seg_worm.stats.description?) - @JimHokanson
self.field = exp_hist.field
self.name = exp_hist.name
self.short_name = exp_hist.short_name
self.units = exp_hist.units
self.feature_category = exp_hist.feature_category
self.hist_type = exp_hist.hist_type
self.motion_type = exp_hist.motion_type
self.data_type = exp_hist.data_type
is_exclusive = (exp_hist.none_valid & & ctl_hist.all_valid) \
| | (exp_hist.all_valid & & ctl_hist.none_valid)
# zscore
#----------------------------------------------------------------------
# This definition is slightly different than the old version, but matches
# the textual description (in code, what about in published paper?)
# From Nature Methods 2013 Supplemental Description:
#----------------------------------------------------------------------
# Measurements exclusively found in the experimental group have a zScore of
# infinity and those found exclusively found in the control are
# -infinity.
if np.isnan(exp_hist.mean):
if (USE_OLD_CODE and is_exclusive) or
(~USE_OLD_CODE and ctl_hist.n_valid_measurements > 1):
self.z_score_experiment = -np.Inf
else:
self.z_score_experiment = np.NaN
elif np.isnan(ctl_hist.mean):
if (USE_OLD_CODE and is_exclusive) or
(~USE_OLD_CODE and exp_hist.n_valid_measurements > 1):
self.z_score_experiment = np.Inf
else:
self.z_score_experiment = np.NaN
else:
# This might need to be means_per_video, not the mean ...
# - @JimHokanson
self.z_score_experiment = (
exp_hist.mean - ctl_hist.mean) / ctl_hist.std
# TODO: Move this to the histogram, not here! These are properties of how
# normal the distributions of the histograms are, and have nothing to do
# with the comparative statistics between the two groups
# - @JimHokanson
#----------------------------------------------------------------------
p_fields = {'p_normal_experiment' 'p_normal_control'};
hist_objs = {exp_hist ctl_hist};
for iObj = 1:2
cur_field = p_fields{iObj};
cur_hist_obj = hist_objs{iObj};
if cur_hist_obj.n_valid_measurements < 3
obj.(cur_field) = NaN;
else
obj.(cur_field) = seg_worm.stats.helpers.swtest(cur_hist_obj.mean_per_video,ALPHA,TAIL);
end
end
# Rules are:
# --------------------------------------
# p_t
#
# - not in one, but all in the other - use fexact (Fishers Exact)
# - otherwise use mattest
#
# p_w
# - not in one, but all in the other - use fexact
# - partial in both - use Wilcoxon rank-sum test
# - if in both, set to NaN
if is_exclusive:
# This is a literal translation of the code (I think)
# I'm a bit confused by it ... - @JimHokanson
n_expt = exp_hist.n_videos
n_total = n_expt + ctl_hist.n_videos
self.p_w = seg_worm.stats.helpers.fexact(
n_expt, n_total, n_expt, n_expt)
self.p_t = self.p_w
elif ~(exp_hist.none_valid | | ctl_hist.none_valid):
# We need a few valid values from both ...
self.p_w = ranksum(exp_hist.valid_means, ctl_hist.valid_means)
# NOTE: This code is for an individual object, the corrections
# are done in the manager which is aware of all objects ...
# pWValues - these seem to be the real statistics used ...
# - exclusive - fexact seg_worm.stats.helpers.fexact
# - ranksum
"""
pass |
# [sublimelinter pyflakes-@python:2.7]
from globalvars import GAMEINFO, TILEMAP
from tileutils import coord_to_pixel
from tile import Sprite
class SnakeSeg(object):
def __init__(self, coords, prev=None, next=None):
self.spritemap = {
"toright": ("images/snake_bod_left.png", "images/snake_bod_right.png"),
"toleft": ("images/snake_bod_right.png", "images/snake_bod_left.png"),
"above": ("images/snake_bod_down.png", "images/snake_bod_up.png"),
"below": ("images/snake_bod_up.png", "images/snake_bod_down.png")
}
self.coords = coords
self.prev = prev
self.next = next
self.first_sprite = None
self.second_sprite = None
if prev:
self.add_sprites(prev.coords)
def add_sprites(self, prevcoord):
first_sprite, second_sprite = self.get_sprite_orientation(prevcoord)
self.first_sprite = Sprite(source=first_sprite, pos=coord_to_pixel(self.coords))
GAMEINFO["gameinstance"].add_widget(self.first_sprite)
self.prev.second_sprite = Sprite(source=second_sprite, pos=coord_to_pixel(self.prev.coords))
GAMEINFO["gameinstance"].add_widget(self.prev.second_sprite)
def remove_sprites(self, dart_location=False):
GAMEINFO["gameinstance"].remove_widget(self.first_sprite)
self.first_sprite = None
GAMEINFO["gameinstance"].remove_widget(self.prev.second_sprite)
self.prev.second_sprite = None
if self.second_sprite:
GAMEINFO["gameinstance"].remove_widget(self.second_sprite)
self.second_sprite = None
TILEMAP[self.coords].move_outof(clear_snakebod=True)
TILEMAP[self.coords].clear_foreground()
if not dart_location:
TILEMAP[self.coords].add_foreground("forest")
def get_sprite_orientation(self, prevcoord):
if self.coords[0] - prevcoord[0] != 0:
return self.spritemap["toright"] if self.coords[0] > prevcoord[0] else self.spritemap["toleft"]
else:
return self.spritemap["above"] if self.coords[1] > prevcoord[1] else self.spritemap["below"]
class SnakeBod(object):
def __init__(self, first):
self.segments = {}
self.segments[first] = SnakeSeg(coords=first)
self.head = self.tail = self.segments[first]
def __getitem__(self, item):
return self.segments[item]
def append(self, coords):
self.segments[coords] = SnakeSeg(coords=coords, prev=self.head)
self.head.next = self.segments[coords]
self.head = self.segments[coords]
def prev(self, coords):
return self.segments[coords].prev
def next(self, coords):
return self.segments[coords].next
def prune(self, coords, digesting=None):
cells = []
finish_digesting = False
if self.segments[coords] == self.tail:
cell = self.segments[coords].next
self.head = self.tail
self.head.next = None
else:
cell = self.segments[coords]
self.head = cell.prev
self.head.next = None
cont = True
while cont:
cells.append(cell.coords)
cell = cell.next
if not cell:
cont = False
if digesting and digesting.coords in cells:
finish_digesting = True
for i in cells:
try:
if i == coords:
self.segments[i].remove_sprites(dart_location=True)
else:
self.segments[i].remove_sprites(dart_location=False)
del self.segments[i]
except KeyError:
pass
return (self.head.coords, finish_digesting)
def __len__(self):
length = 1
current = self.tail
while True:
current = current.next
if current:
length += 1
else:
return length
|
# SPDX-FileCopyrightText: 2021 Aaron Dewes <[email protected]>
#
# SPDX-License-Identifier: MIT
def convertServicesToContainers(app: dict):
app['containers'] = []
# Loop through the dict app['services']
for container in app['services']:
app['services'][container]['name'] = container
app['containers'].append(app['services'][container])
del app['services']
return app
# Now invert convertDataDirToVolume by, from a string in format '${APP_DATA_DIR}/' + container['name'] + '/:' + container['data']
# getting only the part after the :/
def convertVolumeToDataDir(app: dict):
for container in app['containers']:
if 'volumes' in container:
# Try to detect the data dir(ectories), they should be something like ${APP_DATA_DIR}/<something>:<something-else>
# and should be converted into <something>:<something-else>
container['data'] = []
for i in range(len(container['volumes']) - 1, -1, -1):
if('${APP_DATA_DIR}' in container['volumes'][i]):
# ${APP_DATA_DIR}/<something>:<something-else> should be converted into <something>:<something-else>
# Remove the ${APP_DATA_DIR}
container['data'].append(
container['volumes'][i].replace('${APP_DATA_DIR}/', ''))
container['volumes'].remove(container['volumes'][i])
return app
# Remove duplicated items from a list
def removeDuplicates(list_to_clean: list):
return list(set(list_to_clean))
# Get permissions from a container where these are unknown
# If a containers env vars contains the string "BITCOIN", it very likely needs the bitcoind permission
# If a containers env vars contains the string "LND" or a volume contains the string LND_DATA_DIR, it very likely needs the lnd permission
def getContainerPermissions(app: dict, name: str):
for container in app['containers']:
container['permissions'] = []
if("environment" in container):
if(isinstance(container['environment'], list)):
for envVar in container['environment']:
if(str(envVar).find('BITCOIN') != -1):
container['permissions'].append('bitcoind')
if(str(envVar).find('LND') != -1):
container['permissions'].append('lnd')
elif(isinstance(container['environment'], dict)):
for envVar in container['environment'].values():
# BITCOIN_NETWORK is also useful for LND, and doesn't need the btcoin permission
if str(envVar).find('BITCOIN') != -1 and str(envVar).find('BITCOIN_NETWORK') == -1:
container['permissions'].append('bitcoind')
if(str(envVar).find('LND') != -1):
container['permissions'].append('lnd')
if(str(envVar).find('ELECTRUM') != -1):
container['permissions'].append('electrum')
# Now loop through volumes
if('volumes' in container):
for i in range(len(container['volumes']) - 1, -1, -1):
volume = container['volumes'][i]
if('LND_DATA_DIR' in volume):
container['permissions'].append('lnd')
container['volumes'].remove(volume)
continue
if('BITCOIN_DATA_DIR' in volume):
container['permissions'].append('bitcoind')
container['volumes'].remove(volume)
continue
if(len(container['volumes']) == 0):
del container['volumes']
else:
print("Warning: Couldn't parse some volumes for container {} in app {}".format(
container['name'], name))
if(len(container['permissions']) == 0):
del container['permissions']
else:
container['permissions'] = removeDuplicates(
container['permissions'])
return app
def convertComposeYMLToAppYML(app: dict, name: str, registry: dict):
appMetadata = {}
# Get the member of the registry list where element['name']== name
for element in registry:
if(element['id'] == name):
appMetadata = element
break
if(appMetadata == {}):
print("Warning: Couldn't get metadata for app {}".format(name))
app = convertServicesToContainers(app)
app = convertVolumeToDataDir(app)
app = getContainerPermissions(app, name)
for container in app['containers']:
if('networks' in container):
container['ip'] = container['networks']['default']['ipv4_address']
del container['networks']
if('permissions' in container):
if not 'dependencies' in appMetadata:
appMetadata['dependencies'] = []
for permission in container['permissions']:
appMetadata['dependencies'].append(permission)
appMetadata['dependencies'] = removeDuplicates(
appMetadata['dependencies'])
if('restart' in container):
del container['restart']
del app['version']
return {"metadata": appMetadata, **app}
|
# -*- coding: utf-8 -*-
"""
webapp
======
CHANGELOG
=========
0.1.2 / 2021-12-08
------------------
- changes for Python 3.8
- use add_extension for jinja extensions
0.1.1 / 2021-05-19
------------------
- change cors header
0.1.0 / 2021-01-16
------------------
- First Release
"""
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R. Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.2"
__status__ = "Prototype"
import uuid
import os
import os.path as osp
import json
from isp.config import ispConfig
from safrs import log # , paginate, SAFRSResponse
from flask import Flask, send_file
from safrs import SAFRSAPI # , SAFRSRestAPI # api factory
from flask import render_template, request
from flask_cors import CORS
from flask_swagger_ui import get_swaggerui_blueprint
from flask.json import JSONEncoder
import logging
logger = logging.getLogger( "MQTT" )
import safrs
from safrs.swagger_doc import parse_object_doc
def expose_object( self, safrs_object, url_prefix="", **properties):
"""Eine eigene expose_object Funktion um swagger doc zu erzeugen.
Wird bei Klassen ohne Datanbankanbindung verwendet
.. code::
paths: {
<__qualname__> : {
<__http_method> : <__rest_doc>
}
}
In <__rest_doc>.tags wird wenn nicht angegeben __qualname__ abgelegt
In <__rest_doc>.type wird wenn nicht angegeben "string" abgelegt
creates a class of the form
@api_decorator
class Class_API(SAFRSRestAPI):
SAFRSObject = safrs_object
add the class as an api resource to /SAFRSObject and /SAFRSObject/{id}
tablename/collectionname: safrs_object._s_collection_name, e.g. "Users"
classname: safrs_object.__name__, e.g. "User"
Möglichkeiten:
a) /class/ : api_list in class aufrufen
b) /class/{objectId} : keine Funktion objectId vorhanden also api_get aufrufen
c) /class/test : Vorhandene Funktion test in class aufrufen
Parameters
----------
safrs_object : SAFSBase
FSBase subclass that we would like to expose.
url_prefix : str, optional
url prefix. The default is "".
**properties :
additional flask-restful properties.
Returns
-------
None.
"""
# alle methoden der klasse durchgehen und nach __rest_doc suchen
docs = { }
# alle methoden von safrs_object durchsuchen und bei eigenen methoden mit __rest_doc merken
for method_name in dir(safrs_object):
# die method selbst bestimmen
try:
method = getattr(safrs_object, method_name, None)
except Exception as exc:
# method_name query gibt gibt einen fehler
# SQL expression, column, or mapped entity expected - got '<class 'xxxxx'>'
#print( "expose_object - error beim bestimmen von", method_name, exc)
pass
if method and hasattr(method, '__qualname__') and hasattr(method, '__rest_doc'):
# full_name bestimmt die eigentliche Funktion
full_name = "{}.{}".format(safrs_object.__qualname__, method_name)
if method_name == "api_list":
# variante a)
path_name = "/{}/".format( safrs_object.__qualname__ )
elif method_name == "api_get":
# variante b)
path_name = "/{}/{}/".format( safrs_object.__qualname__, "{" + safrs_object._s_object_id + "}" )
else:
# variante c)
path_name = "/{}".format( full_name )
if method and method.__qualname__ == full_name :
# für swagger . durch / ersetzen
path_name = path_name.replace(".", "/")
docs[ path_name ] = {}
for hm in getattr(method, "__http_method", [] ):
method_doc = getattr( method, '__rest_doc', {} )
if not "tags" in method_doc:
method_doc["tags"] = [ safrs_object.__qualname__ ]
if not "type" in method_doc:
method_doc["type"] = "string"
# in docs ablegen
docs[ path_name ][ hm.lower() ] = method_doc
# wenn in docs was ist dann die Klasse selbst in _swagger_object einfügen
if len(docs) > 0:
object_doc = parse_object_doc(safrs_object)
object_doc["name"] = safrs_object.__qualname__
self._swagger_object["tags"].append(object_doc)
custom_swagger = {
"paths": docs
}
# doc im object selbst in _swagger_paths merken
safrs_object._swagger_paths = docs
_swagger_doc = self.get_swagger_doc()
safrs.dict_merge(_swagger_doc, custom_swagger)
class ispBaseWebApp():
"""Eine Flask Webapplication mit API bereitstellen.
Attributes
----------
_config: Dot
geladene config
_urlContentParamsKey : str
querykey für einen jsonstring, der andere query parameter überschreibt
default = "_ispcp"
response_headers : dict
app: Flask
initialisierte Flask app
api: SAFRSAPI
initialisierte SAFRSAPI
apiurl: string
url zur api
"""
def __init__(self, config=None, db=None, name:str=None, webconfig=None, apiconfig=None, overlay:dict={}):
"""Erzeugt die Flask App.
ruft _create_app auf um die Datenbank Api bereitzustellen.
Parameters
----------
config : ispConfig
geladene config mit server.webserver und server.api
db: SQLAlchemy
Initialisiertes SQLAlchemy Object - db=SQLAlchemy()
name: str
Name der connection aus config.database.<name>.connection
webconfig : dot|dict
überschreibt ggf. die Inhalte aus config.server.webserver
apiconfig : dot|dict
überschreibt ggf. die Inhalte aus config.server.api
overlay : dict
überschreibt die Inhalte der config z.B. für unittest
Verwenden von Overlay::
from flask import current_app
print( current_app._configOverlay )
Returns
-------
None.
"""
if config == None:
config = ispConfig( config={
"server": {
"webserver": {
"TESTING": True,
"SECRET_KEY": os.urandom(16)
}
}
})
elif not isinstance(config, ispConfig):
# config immer als ispConfig, dies lädt keine config daten sondern verwendet die angegebenen
config = ispConfig( config=config )
# overlay merken und config updaten
self._configOverlay = overlay
config.update( overlay )
# in self._config merken
self._config = config
# keys für contentparameter
self._urlContentParamsKey = '_ispcp'
self._headerContentParamsKey = 'X-ISP-CONTENT-PARAMS'
# default status code für templates (routeIndex)
self.status_code = 200
# default status code für templates (routeIndex)
self.default_header = {'Content-Type': 'text/html; charset=utf-8'}
#
# webserver konfiguration aus config.server erweitern
#
if webconfig:
self._config.merge( "server.webserver", webconfig )
#
# api konfiguration aus config.server erweitern
#
if apiconfig:
self._config.merge( "server.api", apiconfig )
#
# Hauptdatenbank festlegen
#
db_names = []
db_binds = {}
# name für die Main Datenbank
if not name:
name = self._config.get("database.main", [] )
if type( name ) == str:
db_names.append( name )
elif type( name ) == list:
db_names = name
for name in db_names:
# versuchen eine passende datenbank config zu finden, wenn ja diese verwenden
#db_uri = self._config.get("database." + name + ".connection", "").format( **{"BASE_DIR": self._config.BASE_DIR } )
db_binds[name] = self._config.get("database." + name + ".connection", "", replaceVariables=True)
#
# App erzeugen mit SQLAlchemy() und DatabaseUri
#
app = self._create_app( db, db_binds )
# logger für safrs
log.setLevel( self._config.get("server.logging.safrs", logging.WARNING ) ) # logging.DEBUG
# logger für sqlalchemy
sql_logger = logging.getLogger( "sqlalchemy" )
sql_logger.setLevel( self._config.get("server.logging.sqlalchemy", logging.WARNING ) )
# app starten
if app:
# template_folder auf ui setzen wird bei routeRender verwendet
app.template_folder = osp.join( self._config.BASE_DIR, "ui" )
# wenn gewünscht dbadmin interface bereitstellen
#
# wird in der klasse no_flask_admin=True angegeben wird für diese admin Interface eingebunden
if self._config.get("server.api.DBADMIN", False):
# see if we can add the flask-admin views
try:
from flask_admin import Admin
from flask_admin.contrib import sqla
except Exception as exc: # pragma: no cover
print(f"flask-admin import failed {exc}")
models = self._config.get( "server.api.models", [] )
try:
admin = Admin(app, url="/dbadmin")
for model in models:
if hasattr( model, "no_flask_admin") and model.no_flask_admin == True:
pass
else:
admin.add_view(sqla.ModelView(model, db.session))
except Exception as exc: # pragma: no cover
print(f"Failed to add flask-admin view {exc}")
# app.logger für flask
app.logger.setLevel( self._config.get("server.logging.webapp", logging.WARNING ) )
# Modus festlegen
mode = "APP"
if self._config.get("server.webserver.TESTING"):
mode = "TESTING"
self.apiurl = "http://{}:{}{}".format(
self._config.get("server.webserver.host"),
self._config.get("server.webserver.port"),
self._config.get("server.api.prefix", "")
)
# Webserver startparameter anzeigen
print("Starting '{}' in '{}' Mode".format(
self.apiurl,
mode
))
#return
if mode == "TESTING":
# im testing mode starten
self.app = self.app.test_client()
else: # pragma: no cover
# add CORS support
# Content-Range wird von dstore ausgewertet um die max Anzahl zu bestimmen
CORS( self.app,
expose_headers='Content-Range, Content-Newitem, X-Query, X-Rquery, X_Error, X_Info'
)
# dieser abschnitt wird bei coverage nicht berücksichtigt, da er im testmode nicht ausgeführt wird
# nach dem starten der app wird folgender code erst nach dem beenden ausgeführt
app.run(
host=self._config.get("server.webserver.host"),
port=self._config.get("server.webserver.port"),
use_reloader=self._config.get("server.webserver.reloader"),
threaded=False,
debug=self._config.get("server.webserver.debug")
)
def _create_app(self, db=None, binds:dict={} ):
"""Erzeugt die Flask App.
Ruft create_api auf um die Datenbank api bereitzustellen
Parameters
----------
db: SQLAlchemy
Initialisiertes SQLAlchemy Object - db=SQLAlchemy()
"""
self.app = Flask( self._config.get("server.webserver.name", "webapp" ) )
#SECRET_KEY
#self.app.config['SESSION_TYPE'] = 'memcached'
self.app.config['SECRET_KEY'] = self._config.get("server.webserver.SECRET_KEY", os.urandom(16) )
# config und overlay in app._config merken
self.app._config = self._config
self.app._configOverlay = self._configOverlay
#
# extend jinja options
#
# markdown in templates auch für flask
self.app.jinja_env.add_extension('jinja_markdown.MarkdownExtension')
#self.app.jinja_options['extensions'].append('jinja_markdown.MarkdownExtension')
# Konfigurationen für SQLAlchemy setzen
self.app.config.update( SQLALCHEMY_BINDS=binds )
# debug modus
#self.app.config.update( DEBUG=True )
self.app.config.update( SQLALCHEMY_TRACK_MODIFICATIONS=False)
# print("_create_app-bind", binds, binds.keys() )
if db:
# SQLAlchemy mit app initialisieren
db.init_app( self.app )
# Datenbank und Datenbank Api
with self.app.app_context( ):
try:
db.create_all( bind=binds.keys() )
except Exception as exc: # pragma: no cover
print( "[webapp] _create_app error" , exc)
self._create_api( )
@self.app.before_request
def before_request_func( ):
"""Wird vor jedem request aufgerufen.
hier wird _checkNetarea aufgerufen
Returns
-------
None.
"""
# Zugangsbeschränkung prüfen
return ( self._checkNetarea() )
# zusätzliche routen ermöglichen
self.addRoutes( )
# zusätzliche default routen
@self.app.route('/')
@self.app.route('/<path:filepath>')
def home( filepath:str='' ):
self.status_code = 200
self.default_header = None # auto default
return self.routeIndex( filepath ), self.status_code, self.default_header
return self.app
def _create_api(self):
"""Generate SAFRSAPI.
with additional swagger Doc for classes without a database connection
Returns
-------
None.
"""
# load additional swagger configuration if required
custom_swagger = {
"info" : {
"title" : self._config.get("server.webserver.name", "webapp"),
"description" : self._config.get("server.webserver.title", "webapp"),
"version" : self._config.get("server.webserver.title", __version__)
},
"parameters" : {
"validatorUrl" : False
},
"validatorUrl" : False
}
if self._config.get("server.api.custom_swagger_config", False):
swaggerPath = osp.join( self._config.BASE_DIR, "config", self._config.get("server.api.custom_swagger_config", "") )
# load the specified swagger config
if osp.isfile( swaggerPath ):
with open( swaggerPath, 'r') as f:
custom_swagger = json.load( f )
prefix = self._config.get("server.api.prefix")
self.api = SAFRSAPI(self.app,
host=self._config.get("server.webserver.host"),
port=self._config.get("server.webserver.port"),
prefix=prefix,
swaggerui_blueprint=False,
custom_swagger=custom_swagger
)
## deaktiviere externe swagger-ui Prüfung wenn nicht localhost (validatorUrl=False)
prefix = "/api"
# Call factory function to create our blueprint
swaggerui_blueprint = get_swaggerui_blueprint(
prefix,
"{}/swagger.json".format(prefix),
config={ # Swagger UI config overrides
"docExpansion": "none",
"defaultModelsExpandDepth": -1,
"validatorUrl" : False
}
)
swaggerui_blueprint.json_encoder = JSONEncoder
self.app.register_blueprint(swaggerui_blueprint, url_prefix=prefix)
# go through all models and add a pointer to api
for model in self._config.get("server.api.models"):
# model bekannt machen
self.api.expose_object( model )
# create swagger docu for extensions without a database
if hasattr( model, "no_flask_admin") and model.no_flask_admin == True:
expose_object(self.api, model)
model._api = self.api
def _checkNetarea( self ): # pragma: no cover
"""Simple check whether the access is from the same subnetwork.
Skip on localhost and TESTING=True in config.server.webserver
"""
# is TESTING mode on: do nothing
if self._config.get("server.webserver.TESTING", False):
return
# prüfen auf checkNetarea in config webserver
if not self._config.get("server.webserver.checkNetarea", True):
return
# with unittest there is no REMOTE_ADDR or it is a local call
if request.remote_addr == None or request.remote_addr == "127.0.0.1":
return
# check access
remote_area = ".".join( request.environ.get('REMOTE_ADDR').split(".")[:3] )
netarea = ".".join( request.environ.get('SERVER_NAME').split(".")[:3] )
if not remote_area == netarea:
# Error 401 access not allowed
return "Der Zugriff ist für ihre IP verboten", 401
return
def addRoutes( self ):
"""Überschreibbare Funktion um zusätzliche routen einzubinden.
Sample::
def addRoutes( self ):
@self.app.route('/test/<path:filepath>')
def test_route( filepath:str='' ):
return "testroute"
"""
pass
def parseRequestParams( self, queryParams:dict={}, defaults:dict={} ):
"""Parsed zusätzliche query parameter für routeRender.
Parameters
----------
queryParams: dict
{ '_ispcp': '{"jahr":2018}'}
defaults: dict
vorgaben, die durch angegebene Parameter erweitert bzw. überschrieben werden
Returns
-------
dict:
Überprüfte und zusammengefasste Parameter
"""
# params mit defaults vorbelegen
params = defaults.copy()
#print( ">params", params)
#logger.debug( "parseRequestParams: bei json parse in url content" )
#
# Vorarbeiten <_urlContentParamsKey> auswerten und aus queryParams entfernen
#
urlContentParams = None
if self._urlContentParamsKey in queryParams:
urlContentParams = queryParams[ self._urlContentParamsKey ]
del queryParams[ self._urlContentParamsKey ]
#
# 1. alle url parameter (queryParams) außer <_urlContentParamsKey> einfach verwenden
#
params.update( queryParams )
#
# 2. wenn ein valider jsonstring oder ein dict in <_urlContentParamsKey>
#
if urlContentParams:
if type( urlContentParams ) == str:
try:
rparams = json.loads( urlContentParams )
params.update( rparams )
except:
# print( "json.loads error", urlContentParams )
logger.debug( "parseRequestParams: bei json parse in url content" )
#self.sendAppInfo( "parseRequestParams", "bei json parse in url content" )
pass
elif type( urlContentParams ) == dict: # pragma: no cover
# kann nur passieren wenn render nicht über den Webserver aufgerufen wird
params.update( urlContentParams )
#print( "params>", params)
return params
def routeIndex(self, filepath="" ):
"""Verarbeitet normale Aufrufe.
Umgeleitete filepath Pfade:
* resources/ nach server.webserver.resources
* fonts/ nach server.webserver.resources/fonts
* globals/ nach server.webserver.globals
* dbadminframe/ iframe für dbadmin erstellen
* docs nach docs/build/html/
* htmlcov nach docs/
* render/ rendert .phtml in ui/
alles andere wird auch aus ui/ geholt
ohne filepath wird index.html aufgerufen
Parameters
----------
filepath : str, optional
file und path zu einer datei. The default is "".
Returns
-------
output :
Inhalt der geladenen Datei
"""
# alles normalerweise aus ui verwenden
root = self._config.get("server.webserver.ui", "", replaceVariables = True)
if filepath[:10] == "resources/":
root = self._config.get("server.webserver.resources", "", replaceVariables = True)
filepath = filepath[10:]
elif filepath[:6] == "fonts/":
root = self._config.get("server.webserver.resources", "", replaceVariables = True)
elif filepath[:8] == "globals/":
root = self._config.get("server.webserver.globals", "", replaceVariables = True)
elif filepath[:12] == "apiframe":
return self.routeIFrame( "/api" )
elif filepath[:12] == "dbadminframe":
return self.routeIFrame( "/dbadmin" )
elif filepath[:4] == "docs":
return self.routeDocs( filepath )
elif filepath[:8] == "coverage":
return self.routeCoverage( filepath )
elif filepath[:7] == "render/":
return self.routeRender( filepath[7:] )
elif filepath[-4:] == ".vue" or filepath[:6] == "views/":
self.default_header = {'Content-Type': 'application/javascript; charset=utf-8'}
root = self._config.get("server.webserver.ui", "", replaceVariables = True)
#return self.routeFile( filepath, root )
elif filepath[:9] == "unittest_":
# Spezielle render aufruf für unittest
return self.routeRender( filepath )
else:
# alles andere - ohne angaben index aufrufen
if filepath == "" or filepath == "index.html" or filepath == "index.phtml":
filepath = "index"
return self.routeRender( filepath )
return self.routeFile( filepath, root )
def routeFile( self, filepath:str="", root="" ):
"""Eine normale Datei laden.
Parameters
----------
filepath : str, optional
file und path einer datei aus root. The default is "".
root : str, optional
Basispfad für filepath
Returns
-------
output :
Inhalt der geladenen Datei
"""
# sonst nur die Datei laden
filepath = osp.join( root, filepath ) # .format( **{"BASE_DIR": self._config.BASE_DIR} )
try:
output = send_file( filepath )
except:
output = "<h1>Datei {} wurde nicht gefunden</h1>".format( filepath )
self.status_code = 404
pass
return output
def routeRender( self, filepath:str="" ):
"""Ein Template in ui oder template_folder rendern.
Parameters
----------
filepath : str, optional
file und path einer datei aus ui. The default is "".
Returns
-------
output : str
Das gerenderte Template.
"""
# .vue as default in views
if filepath[-4:] == ".vue" or filepath[:6] == "views/":
if filepath.find(".vue") == -1 and filepath.find(".js") == -1:
filepath = "{}.vue".format( filepath )
else:
# otherwise default is .phtml
if filepath.find(".phtml") == -1:
filepath = "{}.phtml".format( filepath )
uuidstr = str( uuid.uuid1() )
params = {
"uuid" : uuidstr,
"id" : "uuid_" + uuidstr
}
# defaults mit requestParams überschreiben
import connexion
# connexion verwendet FirstValueURIParser collectionFormat: csv
# ?letters=a,b,c&letters=d,e,f wird letters = ['a', 'b', 'c']
params.update( self.parseRequestParams( connexion.request.args.copy() ) )
# value bestimmen
value = params.get("value", None )
try:
output = render_template(
filepath,
params = json.dumps( params ),
value = value,
id = params["id"],
uuid = uuidstr,
**self._config.get("variables", {} ).toDict()
)
except Exception as err:
# print("[webapp] ERROR: render_template:", err, self._config.get("variables", {} ) )
output = "<h1>Das Template {} wurde nicht gefunden oder ein parser error [ {} ] liegt vor.</h1>".format( filepath, err )
self.status_code = 404
pass
return output
def routeIFrame( self, src:str="" ):
"""Filepath in iframe anzeigen.
Aufrufe::
/apiframe - api im iframe anzeigen. Mit src="/api"
/dbadminframe - dbadmin im iframe anzeigen. Mit src="/dbadmin"
Parameters
----------
src : str, optional
src Angabe des iframe. The default is "".
Returns
-------
str
div mit iframe
"""
return '<div class="iframe-container overflow-hidden flex-1"><iframe src="{}" ></iframe></div>'.format( src )
def routeDocs( self, filepath:str="" ):
"""Die Dokumentation anzeigen oder erzeugen.
Aufruf::
/docs/index.html - Dokumentation anzeigen
/docs - Dokumentation im iframe anzeigen. Mit src="docs/index.html"
/docs/build - Dokumentation erzeugen
/docs/rebuild - Dokumentation komplett erneuern (ui-docs)
"""
# wenn nur docs angegeben wurde iframe erstellen
if len(filepath) == 4:
return '<div class="iframe-container overflow-hidden flex-1"><iframe src="/docs/index.html" ></iframe></div>'
# Ausführungspfad für docs festlegen
docs_root = osp.join( self._config.get( "BASE_DIR", "") , '.docs' )
docs_path = docs_root
# docs/ nicht verwenden
filepath = filepath[5:]
# prüfen ob es docs_path gibt, sonst zuerst die dokumentation erzeugen
if not osp.isdir( docs_path ) or not osp.isfile( osp.join( docs_path, "build", "index.html" ) ): # pragma: no cover
filepath = "build"
if filepath == "build" or filepath == "rebuild": # pragma: no cover
# Dokumentation erzeugen filepath als mode mitgeben
if not self.createDocs( docs_path, filepath ):
return "<h1>Eine Dokumentation ist nicht vorhanden.</h1>"
filepath = "index.html"
return self.routeFile( filepath, osp.join( docs_root, "build" ) )
def createDocs( self, docs_path:str="", mode:str="build" ): # pragma: no cover
"""Dokumentation erzeugen oder erneuern.
Parameters
----------
docs_path : str, optional
Pfad nach ui-docs. The default is "".
mode : str, optional
Mit rebuild komplett neu erzeugen sonst nur erneuern. The default is "build".
Returns
-------
bool
``True`` wenn erzeugt wurde, sonst ``False``.
"""
import sphinx.ext.apidoc as apidoc
import sphinx.cmd.build as build
if mode == "rebuild" and osp.isdir( docs_path ):
from shutil import rmtree
try:
rmtree( docs_path )
except:
return False
# ohne docs_path vorlage aus helper/docs kopieren
if not osp.isdir( docs_path ) or not osp.isdir( osp.join( docs_path, "build" ) ):
# conf und _static kopieren
from distutils.dir_util import copy_tree
# vorlage kopieren
#
from_path = osp.join( osp.dirname(osp.abspath( __file__ )), "helper", "sphinx" )
if not osp.isdir( docs_path ):
os.mkdir( docs_path )
# das soll eigentlich copy_tree machen
os.mkdir( osp.join( docs_path, "source") )
os.mkdir( osp.join( docs_path, "source", "_ext") )
os.mkdir( osp.join( docs_path, "source", "_static") )
try:
copy_tree( from_path, docs_path )
except:
logger.debug( "ERROR copy_tree {} {}".format( from_path, docs_path ) )
print( "ERROR copy_tree {} {}".format( from_path, docs_path ) )
return False
# original docs auch kopieren
#
org_docs_from_path = osp.join( self._config.get( "BASE_DIR", "") , 'docs' )
if osp.isdir( org_docs_from_path ):
org_docs_to = osp.join( docs_path, "source", "docs" )
try:
copy_tree( org_docs_from_path, org_docs_to )
except:
logger.debug( "ERROR copy_tree {} {}".format( org_docs_from_path, docs_path ) )
# es wurde nichts angelegt - Fehlermeldung ausgeben
if not osp.isdir( docs_path ):
print("### createDocs no path", docs_path )
return False
# ausführungs Pfad auf docs_path ändern
os.chdir( docs_path )
# ---- 1. rst Dateien in source erzeugen
api_cmd = [
'--force', # force
'-osource/', # destdir
'../', # module_path
'../tests*', # exclude_pattern tests
'../ui*' # weitere exclude_pattern
]
apidoc.main( api_cmd )
# ---- 2. html aus rst Dateien in build erzeugen
#
# get project information from main version file
import version as v
build_cmd = [
'source',
'build',
'-Dcopyright={}'.format( v.__copyright__ ),
'-Dauthor={}'.format( v.__author__ ),
'-Dproject={}'.format( self._config.get("server.webserver.title", v.__project__) ),
'-Dversion={}'.format( v.__version__ ),
'-Drelease={}'.format( v.__version__ )
]
build.main( build_cmd )
return True
def routeCoverage( self, filepath:str="" ): # pragma: no cover
"""Ein template in htmlcov rendern.
Die Pfade in den .html Dateien werden angepasst,
da der Aufruf sonst nur im Verzeichnis selbst fuktioniert
Parameters
----------
filepath : str, optional
Pfad zum template. The default is "".
Returns
-------
str|mixed
Ruckgabe von flask send_file oder geänderter html Inhalt.
"""
# wenn nur coverage angegeben wurde iframe erstellen
if len(filepath) == 8:
return '<div class="overflow-hidden flex-1"><iframe src="/coverage/index.html" ></iframe></div>'
else:
filepath = filepath[9:]
htmlcov_root = osp.join( self._config.get( "BASE_DIR", "") , '.htmlcov' )
# Ausführungspfad für docs festlegen
root = htmlcov_root #.format( **{"BASE_DIR": self._config.BASE_DIR} )
if filepath == "":
filepath = "index.html"
if not osp.isfile( osp.join( root, filepath ) ):
return """
<h1>Coverage wurde noch nicht erzeugt.</h1>
Starte <b>python {}/tests/all_unittest.py</b> von der Kommandozeile
""".format( self._config.get( "BASE_DIR", "") )
# Sonderbehandlung für html dateien dort muss src=" und href=" um /coverage/ erweitert werden
if filepath[-5:] == ".html":
from bs4 import BeautifulSoup
data = ""
with open( osp.join( root, filepath ) , 'r') as myfile:
data = myfile.read()
soup = BeautifulSoup(data, "html.parser")
# alle href suchen
href_tags = soup.find_all(href=True)
for tag in href_tags:
if not tag["href"][:4] == "http":
tag["href"] = "/coverage/" + tag["href"]
src_tags = soup.find_all(src=True)
for tag in src_tags:
tag["src"] = "/coverage/" + tag["src"]
data = str( soup )
return data
return send_file( osp.join( root, filepath ) ) #.format( **{"BASE_DIR": self._config.BASE_DIR} ) )
|
from domain.classroom.attendee import Attendee
from domain.repository import Repository
class AttendeeRepository(Repository):
def _get_entity_type(self) -> str:
return Attendee.__name__
|
# -*- coding: utf-8 -*-
# TODO: refactor "complexity" visitors to be regular visitors
|
"""
Django settings for this project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from environs import Env
env = Env()
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
ALLOWED_HOSTS = env.str("DJANGO_ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third-party
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Local
'accounts',
'upload',
'pages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR.joinpath('project-template'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'config.context_processors.export_vars',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
"ENGINE": env("SQL_ENGINE", "django.db.backends.postgresql"),
"NAME": env("SQL_DATABASE", "product_name_db"),
"USER": env("SQL_USER", "product_name_db_user"),
"PASSWORD": env("SQL_PASSWORD", "product#name#db#user#password"),
"HOST": env("SQL_HOST", "localhost"),
"PORT": env("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
str(BASE_DIR.joinpath('project-static')),
)
STATIC_URL = '/static/'
STATIC_ROOT = str(BASE_DIR.joinpath('files-static'))
MEDIA_URL = "/media/"
MEDIA_ROOT = str(BASE_DIR.joinpath('files-media'))
# Set DEFAULT_AUTO_FIELD for complete project
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# -----------------------------------------------------------
# Project Specific Settings
# -----------------------------------------------------------
# Custom User Model
AUTH_USER_MODEL = 'accounts.CustomUser'
# Crispy Form Template
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-allauth config
SITE_ID = 1
LOGIN_REDIRECT_URL = 'dashboard'
ACCOUNT_LOGOUT_REDIRECT_URL = 'product_page'
ACCOUNT_SIGNUP_REDIRECT_URL = 'account_login'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ACCOUNT_SESSION_REMEMBER = None
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_EMAIL_REQUIRED = True
# FIX-NORMAL: ACCOUNT_USER_DISPLAY (if needed)
def ACCOUNT_USER_DISPLAY(user): return user.email
DEFAULT_FROM_EMAIL = '[email protected]'
|
class Runnable(object):
def run(self):
raise NotImplementedError
def cleanup(self):
pass
|
from .event import Event
class LogEvent(Event):
"""Event for log entries."""
__module__ = 'pyobs.events'
def __init__(self, time=None, level=None, filename=None, function=None, line=None, message=None):
Event.__init__(self)
self.data = {
'time': time,
'level': level,
'filename': filename,
'function': function,
'line': line,
'message': message
}
@property
def time(self):
return self.data['time']
@property
def level(self):
return self.data['level']
@property
def filename(self):
return self.data['filename']
@property
def function(self):
return self.data['function']
@property
def line(self):
return self.data['line']
@property
def message(self):
return self.data['message']
__all__ = ['LogEvent']
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for quadratic_hamiltonian.py."""
from __future__ import absolute_import
import numpy
import unittest
from openfermion.config import EQ_TOLERANCE
from openfermion.hamiltonians import majorana_operator
from openfermion.ops import (normal_ordered,
FermionOperator, QuadraticHamiltonian)
from openfermion.ops._quadratic_hamiltonian import antisymmetric_canonical_form
from openfermion.transforms import get_fermion_operator, get_sparse_operator
from openfermion.utils import get_ground_state
from openfermion.utils._testing_utils import (random_antisymmetric_matrix,
random_hermitian_matrix)
class QuadraticHamiltoniansTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 5
self.constant = 1.7
self.chemical_potential = 2.
# Obtain random Hermitian and antisymmetric matrices
self.hermitian_mat = random_hermitian_matrix(self.n_qubits)
self.antisymmetric_mat = random_antisymmetric_matrix(self.n_qubits)
self.combined_hermitian = (
self.hermitian_mat -
self.chemical_potential * numpy.eye(self.n_qubits))
# Initialize a particle-number-conserving Hamiltonian
self.quad_ham_pc = QuadraticHamiltonian(
self.constant, self.hermitian_mat)
# Initialize a non-particle-number-conserving Hamiltonian
self.quad_ham_npc = QuadraticHamiltonian(
self.constant, self.hermitian_mat, self.antisymmetric_mat,
self.chemical_potential)
# Initialize the sparse operators and get their ground energies
self.quad_ham_pc_sparse = get_sparse_operator(self.quad_ham_pc)
self.quad_ham_npc_sparse = get_sparse_operator(self.quad_ham_npc)
self.pc_ground_energy, self.pc_ground_state = get_ground_state(
self.quad_ham_pc_sparse)
self.npc_ground_energy, self.npc_ground_state = get_ground_state(
self.quad_ham_npc_sparse)
def test_combined_hermitian_part(self):
"""Test getting the combined Hermitian part."""
combined_hermitian_part = self.quad_ham_pc.combined_hermitian_part
for i in numpy.ndindex(combined_hermitian_part.shape):
self.assertAlmostEqual(self.hermitian_mat[i],
combined_hermitian_part[i])
combined_hermitian_part = self.quad_ham_npc.combined_hermitian_part
for i in numpy.ndindex(combined_hermitian_part.shape):
self.assertAlmostEqual(self.combined_hermitian[i],
combined_hermitian_part[i])
def test_hermitian_part(self):
"""Test getting the Hermitian part."""
hermitian_part = self.quad_ham_pc.hermitian_part
for i in numpy.ndindex(hermitian_part.shape):
self.assertAlmostEqual(self.hermitian_mat[i], hermitian_part[i])
hermitian_part = self.quad_ham_npc.hermitian_part
for i in numpy.ndindex(hermitian_part.shape):
self.assertAlmostEqual(self.hermitian_mat[i], hermitian_part[i])
def test_antisymmetric_part(self):
"""Test getting the antisymmetric part."""
antisymmetric_part = self.quad_ham_pc.antisymmetric_part
for i in numpy.ndindex(antisymmetric_part.shape):
self.assertAlmostEqual(0., antisymmetric_part[i])
antisymmetric_part = self.quad_ham_npc.antisymmetric_part
for i in numpy.ndindex(antisymmetric_part.shape):
self.assertAlmostEqual(self.antisymmetric_mat[i],
antisymmetric_part[i])
def test_conserves_particle_number(self):
"""Test checking whether Hamiltonian conserves particle number."""
self.assertTrue(self.quad_ham_pc.conserves_particle_number)
self.assertFalse(self.quad_ham_npc.conserves_particle_number)
def test_add_chemical_potential(self):
"""Test adding a chemical potential."""
self.quad_ham_npc.add_chemical_potential(2.4)
combined_hermitian_part = self.quad_ham_npc.combined_hermitian_part
hermitian_part = self.quad_ham_npc.hermitian_part
want_combined = (self.combined_hermitian -
2.4 * numpy.eye(self.n_qubits))
want_hermitian = self.hermitian_mat
for i in numpy.ndindex(combined_hermitian_part.shape):
self.assertAlmostEqual(combined_hermitian_part[i],
want_combined[i])
for i in numpy.ndindex(hermitian_part.shape):
self.assertAlmostEqual(hermitian_part[i], want_hermitian[i])
self.assertAlmostEqual(2.4 + self.chemical_potential,
self.quad_ham_npc.chemical_potential)
def test_orbital_energies(self):
"""Test getting the orbital energies."""
# Test the particle-number-conserving case
orbital_energies, constant = self.quad_ham_pc.orbital_energies()
# Test the ground energy
energy = numpy.sum(
orbital_energies[orbital_energies < -EQ_TOLERANCE]) + constant
self.assertAlmostEqual(energy, self.pc_ground_energy)
# Test the non-particle-number-conserving case
orbital_energies, constant = self.quad_ham_npc.orbital_energies()
# Test the ground energy
energy = constant
self.assertAlmostEqual(energy, self.npc_ground_energy)
def test_ground_energy(self):
"""Test getting the ground energy."""
# Test particle-number-conserving case
energy = self.quad_ham_pc.ground_energy()
self.assertAlmostEqual(energy, self.pc_ground_energy)
# Test non-particle-number-conserving case
energy = self.quad_ham_npc.ground_energy()
self.assertAlmostEqual(energy, self.npc_ground_energy)
def test_majorana_form(self):
"""Test getting the Majorana form."""
majorana_matrix, majorana_constant = self.quad_ham_npc.majorana_form()
# Convert the Majorana form to a FermionOperator
majorana_op = FermionOperator((), majorana_constant)
for i in range(2 * self.n_qubits):
if i < self.n_qubits:
left_op = majorana_operator((i, 1))
else:
left_op = majorana_operator((i - self.n_qubits, 0))
for j in range(2 * self.n_qubits):
if j < self.n_qubits:
right_op = majorana_operator((j, 1), majorana_matrix[i, j])
else:
right_op = majorana_operator((j - self.n_qubits, 0),
majorana_matrix[i, j])
majorana_op += .5j * left_op * right_op
# Get FermionOperator for original Hamiltonian
fermion_operator = normal_ordered(
get_fermion_operator(self.quad_ham_npc))
self.assertTrue(
normal_ordered(majorana_op).isclose(fermion_operator))
def test_diagonalizing_bogoliubov_transform(self):
"""Test getting the diagonalizing Bogoliubov transformation."""
hermitian_part = self.quad_ham_npc.combined_hermitian_part
antisymmetric_part = self.quad_ham_npc.antisymmetric_part
block_matrix = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits),
dtype=complex)
block_matrix[:self.n_qubits, :self.n_qubits] = antisymmetric_part
block_matrix[:self.n_qubits, self.n_qubits:] = hermitian_part
block_matrix[self.n_qubits:, :self.n_qubits] = -hermitian_part.conj()
block_matrix[self.n_qubits:, self.n_qubits:] = (
-antisymmetric_part.conj())
ferm_unitary = self.quad_ham_npc.diagonalizing_bogoliubov_transform()
# Check that the transformation is diagonalizing
majorana_matrix, majorana_constant = self.quad_ham_npc.majorana_form()
canonical, orthogonal = antisymmetric_canonical_form(majorana_matrix)
diagonalized = ferm_unitary.conj().dot(
block_matrix.dot(ferm_unitary.T.conj()))
for i in numpy.ndindex((2 * self.n_qubits, 2 * self.n_qubits)):
self.assertAlmostEqual(diagonalized[i], canonical[i])
lower_unitary = ferm_unitary[self.n_qubits:]
lower_left = lower_unitary[:, :self.n_qubits]
lower_right = lower_unitary[:, self.n_qubits:]
# Check that lower_left and lower_right satisfy the constraints
# necessary for the transformed fermionic operators to satisfy
# the fermionic anticommutation relations
constraint_matrix_1 = (lower_left.dot(lower_left.T.conj()) +
lower_right.dot(lower_right.T.conj()))
constraint_matrix_2 = (lower_left.dot(lower_right.T) +
lower_right.dot(lower_left.T))
identity = numpy.eye(self.n_qubits, dtype=complex)
for i in numpy.ndindex((self.n_qubits, self.n_qubits)):
self.assertAlmostEqual(identity[i], constraint_matrix_1[i])
self.assertAlmostEqual(0., constraint_matrix_2[i])
class MajoranaOperatorTest(unittest.TestCase):
def test_none_term(self):
majorana_op = majorana_operator()
self.assertTrue(majorana_operator().isclose(FermionOperator()))
def test_bad_coefficient(self):
with self.assertRaises(ValueError):
majorana_op = majorana_operator((1, 1), 'a')
def test_bad_term(self):
with self.assertRaises(ValueError):
majorana_op = majorana_operator((2, 2))
with self.assertRaises(ValueError):
majorana_op = majorana_operator('a')
class AntisymmetricCanonicalFormTest(unittest.TestCase):
def test_equality(self):
"""Test that the decomposition is valid."""
n = 7
rand_mat = numpy.random.randn(2 * n, 2 * n)
antisymmetric_matrix = rand_mat - rand_mat.T
canonical, orthogonal = antisymmetric_canonical_form(
antisymmetric_matrix)
result_matrix = orthogonal.dot(antisymmetric_matrix.dot(orthogonal.T))
for i in numpy.ndindex(result_matrix.shape):
self.assertAlmostEqual(result_matrix[i], canonical[i])
def test_canonical(self):
"""Test that the returned canonical matrix has the right form."""
n = 7
# Obtain a random antisymmetric matrix
rand_mat = numpy.random.randn(2 * n, 2 * n)
antisymmetric_matrix = rand_mat - rand_mat.T
canonical, orthogonal = antisymmetric_canonical_form(
antisymmetric_matrix)
for i in range(2 * n):
for j in range(2 * n):
if i < n and j == n + i:
self.assertTrue(canonical[i, j] > -EQ_TOLERANCE)
elif i >= n and j == i - n:
self.assertTrue(canonical[i, j] < EQ_TOLERANCE)
else:
self.assertAlmostEqual(canonical[i, j], 0.)
diagonal = canonical[range(n), range(n, 2 * n)]
for i in range(n - 1):
self.assertTrue(diagonal[i] <= diagonal[i + 1])
def test_bad_dimensions(self):
n, p = (3, 4)
ones_mat = numpy.ones((n, p))
with self.assertRaises(ValueError):
_ = antisymmetric_canonical_form(ones_mat)
def test_not_antisymmetric(self):
n = 4
ones_mat = numpy.ones((n, n))
with self.assertRaises(ValueError):
_ = antisymmetric_canonical_form(ones_mat)
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^',include('home.urls')),
url(r'^admin/', admin.site.urls),
url(r'^admin_login/', include('admin_login.urls')),
url(r'^asr/', include('doctor_login.urls')),
url(r'^patient_login/', include('patient_login.urls')),
]
|
from collections import deque
def solution(cacheSize, cities):
cache = deque([])
cnt = 0
for i in cities:
i = i.lower()
if i not in cache:
if cacheSize == 0:
return 5 * len(cities)
if len(cache) == cacheSize:
cache.popleft()
cnt += 5
cache.append(i)
else:
cnt += 1
cache.remove(i)
cache.append(i)
# print(cache)
return cnt
|
import requests
from flask import current_app
from morio.task import celery
@celery.task()
def send_mail(to, subject, text=None, html=None):
config = current_app.config
domain = config.get('MAILGUN_DOMAIN')
key = config.get('MAILGUN_API_KEY')
mailgun_address = config.get('MAILGUN_ADDRESS')
return requests.post(
'{}/messages'.format(domain),
auth=('api', key),
data={
'from': mailgun_address,
'to': to,
'subject': subject,
'text': text,
'html': html,
}
)
|
import itertools
import os
import csv
import collections
import networkx
import numpy
import numpy.linalg
import bioparser.data
class DiseasomeMaker(object):
def __init__(self):
self.ontology = self.get_ontology()
@staticmethod
def transfer_annotations(node_to_genes, node_goto_node):
for source_node, sink_node in node_goto_node.items():
genes = node_to_genes.pop(source_node, set())
node_to_genes.setdefault(sink_node, set()).update(genes)
return node_to_genes
def set_gene_annotations(self, node_goto_node=dict()):
"""
Transfers gene annotations from the key node to the
value node.
"""
node_to_genes = dict()
gene_dicts = {'GWAS': self.get_gwas_catalog_annotations(),
#'CTD': self.get_ctd_annotations(),
'OMIM': self.get_omim_annotations()
}
for gene_dict in gene_dicts.values():
DiseasomeMaker.transfer_annotations(gene_dict, node_goto_node)
nodes = reduce(lambda x, y: set.union(set(x), set(y)), gene_dicts.values())
node_to_genes = dict()
for node in nodes:
for gene_dict in gene_dicts.values():
genes = gene_dict.get(node, set())
node_to_genes.setdefault(node, set()).update(genes)
self.gene_dicts = gene_dicts
self.node_to_genes = node_to_genes
return node_to_genes
def get_gwas_catalog_annotations(self):
# Load gwas_catalog and gene annotations.
gwas_catalog = bioparser.data.Data().gwas_catalog
node_to_genes = gwas_catalog.get_doid_id_to_genes(p_cutoff=None, fdr_cutoff=None, mapped_term_cutoff=1, exclude_pmids=set())
return node_to_genes
def get_omim_annotations(self):
morbid_map = bioparser.data.Data().morbid_map
omim_associations = morbid_map.get_associations()
mim_to_genes = dict()
for omim_association in omim_associations:
mim = omim_association['mim_number']
gene = omim_association['gene']
mim_to_genes.setdefault(mim, set()).add(gene)
doid = bioparser.data.Data().doid
doid_to_xrefs = doid.get_doid_to_xrefs('OMIM')
node_to_genes = dict()
for doid_code, mims in doid_to_xrefs.iteritems():
genes = set()
for mim in mims:
genes |= mim_to_genes.get(mim, set())
if not genes:
continue
node_to_genes[doid_code] = genes
return node_to_genes
def get_ctd_annotations(self):
ctd = bioparser.data.Data().ctd
symbol_to_gene = bioparser.data.Data().hgnc.get_symbol_to_gene()
medic_to_genes = dict()
for ctd_row in ctd.read_gene2disease_filtered():
if 'marker/mechanism' not in ctd_row['DirectEvidence']:
continue
symbol = ctd_row['GeneSymbol']
gene = symbol_to_gene.get(symbol)
if not gene:
continue
medic_to_genes.setdefault(ctd_row['DiseaseID'], set()).add(gene)
doid = bioparser.data.Data().doid
doid_to_xrefs = doid.get_doid_to_xrefs('MSH', 'MESH:')
doid_to_xrefs.update(doid.get_doid_to_xrefs('OMIM', 'OMIM:'))
node_to_genes = dict()
for doid_code, medic_codes in doid_to_xrefs.iteritems():
genes = set()
for medic_code in medic_codes:
genes |= medic_to_genes.get(medic_code, set())
if not genes:
continue
node_to_genes[doid_code] = genes
return node_to_genes
def get_ontology(self):
# Load the disease ontology.
doid = bioparser.data.Data().doid
ontology = doid.get_ontology()
return ontology
def get_graph(self, gene_minimum=10, exclude=set(), include=set()):
"""
Returns nodes only
gene_minimum - nodes with fewer than gene_minimum annotated genes are excluded
exclude - set of nodes to exclude from the analysis.
"""
graph = networkx.Graph()
keep_data_keys = ['name']
assert not exclude & include
for node, genes in self.node_to_genes.items():
if node in exclude:
continue
if node not in include and len(genes) < gene_minimum:
continue
data = self.ontology.graph.node[node]
data = {key: data[key] for key in keep_data_keys}
data['genes'] = genes
graph.add_node(node, data)
return graph
@staticmethod
def get_gene_counter(graph):
gene_counter = collections.Counter()
for node, data in graph.nodes_iter(True):
gene_counter.update(data['genes'])
return gene_counter
@staticmethod
def get_overlap(set_0, set_1, elem_counter=None):
assert isinstance(set_0, set)
assert isinstance(set_1, set)
intersect = set_0 & set_1
union = set_0 | set_1
if elem_counter:
intersect = sum(elem_counter[elem] ** 0.5 for elem in intersect)
union = sum(elem_counter[elem] ** 0.5 for elem in union)
else:
intersect = len(intersect)
union = len(union)
jaccard = float(intersect) / union if union else 0.0
overlap = {'intersect': intersect, 'union': union, 'jaccard': jaccard}
return overlap
@staticmethod
def connect(graph, weight_metric='jaccard', weight_genes=True):
if weight_genes:
gene_counter = DiseasomeMaker.get_gene_counter(graph)
else:
gene_counter = None
for node_0, node_1 in itertools.combinations(graph, 2):
data_0 = graph.node[node_0]
data_1 = graph.node[node_1]
genes_0 = data_0['genes']
genes_1 = data_1['genes']
edge_metrics = DiseasomeMaker.get_overlap(genes_0, genes_1, gene_counter)
weight = edge_metrics[weight_metric]
if weight:
graph.add_edge(node_0, node_1, weight=weight)
# Remove diseases without any gene overlap with other diseases
unconnected_nodes = {node for node, degree in graph.degree_iter() if not degree}
graph.remove_nodes_from(unconnected_nodes)
#graph = graph.subgraph(keep_nodes) # if want to leave original unmodified
return graph
@staticmethod
def get_adjacency_matrix(graph):
matrix = networkx.adjacency_matrix(graph)
return matrix
@staticmethod
def get_seed_distance(graph, genes, weight_metric='jaccard', weight_genes=True):
if weight_genes:
gene_counter = DiseasomeMaker.get_gene_counter(graph)
else:
gene_counter = None
genes = set(genes)
seed_list = list()
for data in graph.node.values():
overlap = DiseasomeMaker.get_overlap(genes, data['genes'], gene_counter)
seed_list.append(overlap[weight_metric])
seed = numpy.array(seed_list)
return seed
@staticmethod
def add_node_attribute(graph, key, node_to_value, default=None):
for node, data in graph.nodes_iter(data=True):
value = node_to_value.get(node, default)
data[key] = value
@staticmethod
def graph_minus(graph, nodes):
all_nodes = graph.nodes()
return graph.subgraph(set(all_nodes) - set(nodes))
@staticmethod
def save_as_gml(diseasome, path):
diseasome = diseasome.copy()
doid_code_to_name = dict()
for node, data in diseasome.nodes_iter(data=True):
data['genes'] = '|'.join(sorted(gene.symbol for gene in data['genes']))
#del data['genes']
data['category'] = data['category'] or ''
data['doid_code'] = node
doid_code_to_name[node] = data['name']
del data['name']
networkx.relabel_nodes(diseasome, doid_code_to_name, copy=False)
networkx.write_gml(diseasome, path)
@staticmethod
def save_flat_txt(diseasome, gene_dicts, path):
write_file = open(path, 'w')
fieldnames = ['doid_code', 'name', 'category', 'n_genes', 'genes'] + gene_dicts.keys()
writer = csv.DictWriter(write_file, delimiter='\t', fieldnames=fieldnames)
writer.writeheader()
for node, data in diseasome.nodes_iter(data=True):
genes = data['genes']
row = {'doid_code': node, 'name': data['name'],
'category': data['category'], 'n_genes': len(genes),
'genes': '|'.join(sorted(gene.symbol for gene in genes))}
for resource, node_to_genes in gene_dicts.items():
gene_subset = node_to_genes.get(node, list())
row[resource] = '|'.join(sorted(gene.symbol for gene in gene_subset))
writer.writerow(row)
write_file.close()
@staticmethod
def get_stats(diseasome):
print 'Number of nodes: {}'.format(diseasome.order())
print 'Number of edges: {}'.format(diseasome.size())
total_annotations = 0
distinct_genes = set()
for data in diseasome.node.values():
genes = data['genes']
total_annotations += len(genes)
distinct_genes |= genes
print 'Number of annotated genes: {}'.format(total_annotations)
print 'Number of distinct genes: {}'.format(len(distinct_genes))
if __name__ == '__main__':
import pprint
dm = DiseasomeMaker()
#dm.get_gene_annotations()
#pprint.pprint()
code = 'DOID:9256'
code = 'DOID:1520' # colon carcinoma
print dm.get_gwas_catalog_annotations().get(code)
print dm.get_omim_annotations().get(code)
|
#!/usr/bin/env python3
# _ ____ ____ ___ ___ _
# / \ / ___| / ___|_ _|_ _| __ _ _ __| |_
# / _ \ \___ \| | | | | | / _` | '__| __|
# / ___ \ ___) | |___ | | | | | (_| | | | |_
# /_/ \_\____/ \____|___|___| \__,_|_| \__|
#
# - from github.com/basnijholt/home-assistant-config
# Use manually on http://patorjk.com/software/taag/#p=display&f=Standard&t=input_boolean
from pathlib import Path
import pyfiglet
start = "#-\n"
end = "#- from github.com/basnijholt/home-assistant-config\n"
def remove_text(content):
do_append = True
new = []
for line in content:
if start in line:
do_append = not do_append
if do_append:
new.append(line)
if end in line:
do_append = not do_append
return new
def add_text(text, content):
text = pyfiglet.figlet_format(text, width=200)
formatted = [("# " + i).rstrip(" ") for i in text.split("\n")]
i_insert_gen = (i + 1 for i, line in enumerate(content) if line.startswith("---"))
i_insert = next(i_insert_gen, 0)
content.insert(i_insert, start) # start
for i, line in enumerate(formatted):
content.insert(i + 1 + i_insert, line + "\n")
content.insert(i + 2 + i_insert, end) # end
return content
folders = ["automations", "includes", ""]
for folder in folders:
folder = Path(".") / folder
for fname in folder.glob("*.yaml"):
with fname.open() as f:
content = f.readlines()
content = remove_text(content)
content = add_text(fname.stem, content)
with fname.open("w") as f:
f.write("".join(content))
|
# -*- coding: utf-8 -*-
"""
Generate a package with IR implementations and tools.
"""
from __future__ import print_function, division, absolute_import
import os
from textwrap import dedent
from itertools import chain
from . import generator
from . import formatting
from . import astgen
from . import visitorgen
from . import naming
#------------------------------------------------------------------------
# Tools Flags
#------------------------------------------------------------------------
cython = 1
#------------------------------------------------------------------------
# Tools Resolution
#------------------------------------------------------------------------
class Tool(object):
def __init__(self, codegens, flags=0, depends=[]):
self.codegens = codegens
self.flags = flags
self.depends = depends
def __repr__(self):
return "Tool(codegens=[%s])" % ", ".join(map(str, self.codegens))
def resolve_tools(tool_list, mask, tools=None, seen=None):
if tools is None:
tools = []
seen = set()
for tool in tool_list:
if not (tool.flags & mask) and tool not in seen:
seen.add(tool)
resolve_tools(tool.depends, mask, tools, seen)
tools.append(tool)
return tools
def enumerate_tools(feature_names, mask):
tool_set = set(chain(*[features[name] for name in feature_names]))
tools = resolve_tools(tool_set, mask)
return tools
def enumerate_codegens(feature_names, mask):
tools = enumerate_tools(feature_names, mask)
codegens = list(chain(*[tool.codegens for tool in tools]))
return codegens
#------------------------------------------------------------------------
# Tool Definitions
#------------------------------------------------------------------------
def make_codegen_dict(codegens):
return dict((codegen.out_filename, codegen) for codegen in codegens)
all_codegens = astgen.codegens + visitorgen.codegens
gens = make_codegen_dict(all_codegens)
pxd_ast_tool = Tool([gens[naming.nodes + ".pxd"]], flags=cython)
py_ast_tool = Tool([gens[naming.nodes + ".py"]])
pxd_interface_tool = Tool([gens[naming.interface + ".pxd"]], flags=cython,
depends=[pxd_ast_tool])
py_interface_tool = Tool([gens[naming.interface + ".py"]],
depends=[py_ast_tool])
pxd_visitor_tool = Tool([gens[naming.visitor + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_visitor_tool = Tool([gens[naming.visitor + ".py"]],
depends=[py_interface_tool, pxd_visitor_tool])
pxd_transform_tool = Tool([gens[naming.transformer + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_transformr_tool = Tool([gens[naming.transformer + ".py"]],
depends=[py_interface_tool, pxd_transform_tool])
pxd_ast_tool.depends.extend([pxd_interface_tool, py_interface_tool])
#------------------------------------------------------------------------
# Feature Definitions & Entry Points
#------------------------------------------------------------------------
features = {
'all': [py_ast_tool, py_visitor_tool, py_transformr_tool],
'ast': [py_ast_tool],
'visitor': [py_visitor_tool],
'transformer': [py_transformr_tool],
}
def build_package(schema_filename, feature_names, output_dir, mask=0):
"""
Build a package from the given schema and feature names in output_dir.
:param mask: indicates which features to mask, e.g. specifying
'mask=build.cython' disables Cython support.
"""
codegens = enumerate_codegens(feature_names, mask)
disk_allocator = generator.generate_from_file(
schema_filename, codegens, output_dir)
try:
_make_package(disk_allocator, codegens)
finally:
disk_allocator.close()
#------------------------------------------------------------------------
# Package Building Utilities
#------------------------------------------------------------------------
source_name = lambda fn: os.path.splitext(os.path.basename(fn))[0]
def _make_package(disk_allocator, codegens):
_make_init(disk_allocator, codegens)
# Make Cython dependency optional
# disk_allocator.open_sourcefile("cython.py")
fns = [c.out_filename for c in codegens if c.out_filename.endswith('.pxd')]
if fns:
_make_setup(disk_allocator, [source_name(fn) + '.py' for fn in fns])
def _make_init(disk_allocator, codegens):
init = disk_allocator.open_sourcefile("__init__.py")
init.write(dedent("""
# Horrid hack to make work around circular cimports
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
"""))
for c in codegens:
if c.out_filename.endswith('.py'):
modname = source_name(c.out_filename)
init.write("from %s import *\n" % modname)
def _make_setup(disk_allocator, filenames):
setup = disk_allocator.open_sourcefile("setup.py")
ext_modules = ["Extension('%s', ['%s'])" % (source_name(fn), fn)
for fn in filenames]
setup.write(dedent("""
from distutils.core import setup
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
ext_modules = [
%s
]
setup(
# ext_modules=cythonize('*.pyx'),
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
)
""") % formatting.py_formatter.format_stats(",\n", 4, ext_modules))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-10 18:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("hosts", "0003_challengehostteam_team_url")]
operations = [
migrations.AlterField(
model_name="challengehostteam",
name="team_url",
field=models.CharField(blank=True, default="", max_length=1000),
)
]
|
import os
import numpy as np
import configparser as cfg_parser
import tensorflow as tf
import image_util
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
cp = cfg_parser.ConfigParser()
cp.read('net.cfg')
batch_size = cp.getint('train', 'batch_size')
noise_size = cp.getint('train', 'noise_size')
epochs = cp.getint('train', 'epochs')
n_samples = cp.getint('train', 'n_samples')
learning_rate = cp.getfloat('train', 'learning_rate')
beta1 = cp.getfloat('train', 'beta1')
max_to_keep = cp.getint('train', 'max_to_keep')
break_time = cp.getint('train', 'break_time')
image_num = cp.getint('image', 'image_num')
image_height = cp.getint('image', 'image_height')
image_width = cp.getint('image', 'image_width')
image_depth = cp.getint('image', 'image_depth')
def get_inputs():
"""
get two placeholder
:return:
inputs_real: the tensor with shape[?, image_height. image_width, image_depth], as the input of D
iputs_noise: the tensor with shape[?, noise_size], as the input of G
"""
inputs_real = tf.placeholder(tf.float32, [None, image_height, image_width, image_depth], name='inputs_real')
inputs_noise = tf.placeholder(tf.float32, [None, noise_size], name='inputs_noise')
return inputs_real, inputs_noise
def get_generator(noise, training, reuse, alpha=0.1):
"""
define the structure of G
:param noise: the input of G, the shape should be [?, noise_size]
:param training: boolean, represent is it in training?
:param reuse: boolean, represent does it reuse the name scope of G 'generator'
:param alpha: a param of activation function(leaky RELU)
:return:
outputs: the output of G with such input, with shape[?, image_height, image_width, image_depth]. the range is [-1, 1]
"""
with tf.variable_scope("generator", reuse=reuse):
# [?, 100] to [?, 4x4x1024]
# [?, 4x4x1024] to [?, 4, 4, 1024]
# connected
layer1 = tf.layers.dense(noise, 4 * 4 * 1024)
layer1 = tf.reshape(layer1, [-1, 4, 4, 1024])
layer1 = tf.layers.batch_normalization(layer1, training=training) # BN
layer1 = tf.maximum(alpha * layer1, layer1) # LeakyRELU
# [?, 4, 4, 1024] to [?, 8, 8, 512]
# reverse conv
# use 512 kernels with shape [3, 3, 1024], with strides=2 and padding='same'
layer2 = tf.layers.conv2d_transpose(layer1, 512, 3, strides=2, padding='same')
layer2 = tf.layers.batch_normalization(layer2, training=training) # BN
layer2 = tf.maximum(alpha * layer2, layer2) # LeakyRELU
# [?, 8, 8, 512] to [?, 16, 16, 256]
# reverse conv
# use 56 kernels with shape [3, 3, 512], with strides=2 and padding='same'
layer3 = tf.layers.conv2d_transpose(layer2, 256, 3, strides=2, padding='same')
layer3 = tf.layers.batch_normalization(layer3, training=training) # BN
layer3 = tf.maximum(alpha * layer3, layer3) # LeakyRELU
# [?, 16, 16, 256] to [?, 32, 32, 128]
# reverse conv
# use 128 kernels with shape [3, 3, 256], with strides=2 and padding='same'
layer4 = tf.layers.conv2d_transpose(layer3, 128, 3, strides=2, padding='same')
layer4 = tf.layers.batch_normalization(layer4, training=training) # BN
layer4 = tf.maximum(alpha * layer4, layer4) # LeakyRELU
# [?, 32, 32, 128] to [?, 64, 64, 3(image_depth)]
# reverse conv
# use 3 kernels with shape [3, 3, 128], with strides=2 and padding='same'
logits = tf.layers.conv2d_transpose(layer4, image_depth, 3, strides=2, padding='same')
outputs = tf.tanh(logits) # use tanh as activation function without BN, reflect the result to [-1.0, 1.0]
return outputs
def get_discriminator(input_imgs, training, reuse, alpha=0.1):
"""
define the structure of G
:param input_imgs: the input of D,the input image can be from train_data or generated by G. the range should
be [-1,1], so if the input is real image, you need to do some reflection prior.
the shape should be [?, image_height, image_width, image_depth]
:param training: just like the statement shows above
:param reuse:
:param alpha:
:return:
logits: the output of D, but without the last operation -- 'sigmod', so the range of that is R. why we return such
a useless value? Due to the mechanism of sigmoid_cross_entropy_with_logits, the function's param-limitation.
outputs: the final output of D, the range is [0, 1] because of reflection of 'sigmoid'
"""
# the structure of D is similar to G
with tf.variable_scope("discriminator", reuse=reuse):
# [?, 64, 64, 3(image_depth)] to [?, 32, 32, 64]
# conv
# use 64 kernels with shape [5, 5, 3], with strides=2 and padding='same'
layer1 = tf.layers.conv2d(input_imgs, 64, 5, strides=2, padding='same')
# Attention, don't pass data for BN. I don't know why, but everyone do it like this. hhhh
layer1 = tf.maximum(alpha * layer1, layer1) # LeakyRELU
# [?, 32, 32, 64] to [?, 16, 16, 128]
# conv
# use 128 kernels with shape [5, 5, 64], with strides=2 and padding='same'
layer2 = tf.layers.conv2d(layer1, 128, 5, strides=2, padding='same')
layer2 = tf.layers.batch_normalization(layer2, training=training) # BN
layer2 = tf.maximum(alpha * layer2, layer2) # LeakyRELU
# [?, 16, 16, 128] to [?, 8, 8, 256]
# conv
# use 256 kernels with shape [5, 5, 128], with strides=2 and padding='same'
layer3 = tf.layers.conv2d(layer2, 256, 5, strides=2, padding='same')
layer3 = tf.layers.batch_normalization(layer3, training=training) # BN
layer3 = tf.maximum(alpha * layer3, layer3) # LeakyRELU
# [?, 8, 8, 256] to [?, 4, 4, 512]
# conv
# uee 512kernels with shape [5, 5, 256], with strides=2 and padding='same'
layer4 = tf.layers.conv2d(layer3, 512, 5, strides=2, padding='same')
layer4 = tf.layers.batch_normalization(layer4, training=training) # BN
layer4 = tf.maximum(alpha * layer4, layer4) # LeakyRELU
# [?, 4, 4, 512] to [?, 4x4x512]
# [?, 4, 4, 512] to [?, 1]
# connected
flatten = tf.reshape(layer4, (-1, 4 * 4 * 512))
logits = tf.layers.dense(flatten, 1) # catch the logits here
outputs = tf.sigmoid(logits) # sigmod, reflect the result to [-1.0, 1.0]
return logits, outputs
def get_loss(noise, real_imgs, smooth=0.05):
"""
calculate the loss with given data. the loss can be divided for two parts -- the loss of D and the loss of G.
D_loss symbols the level of distinction of given image, while the G_loss symbols the ability of fake image.
D_loss is constituted of D_real_loss() and D_fake_loss.
in paper, G: minimize{ log(1-D(G(z))) } and the loss of D: maximize{ log(D(x)) + log(1 - D(G(z))) }
the detail of function 'sigmoid_cross_entropy_with_logits(logits, label)' :
y = label p = sigmod(logits) loss = -[y * ln(p) + (1-y) * ln(1-p)]
:param noise: the input of G, the noise prepared for G.
:param real_imgs: the real images from train_data, whose range has been reflected from [0, 255] to [-1, 1]
in each channel
:param smooth: a param for prevent from overfitting, set the label value with (1-smooth) but not 1.
:return:
return a tuple contain two part: (G_loss, D_loss)
"""
# ========================begin: calculate g_loss ========================
g_outputs = get_generator(noise, True, False)
d_logits_fake, d_outputs_fake = get_discriminator(g_outputs, True, False)
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
G_loss: minimize[log(1-D(G(z)))] to minimize[- log(D(G(z)))]
The G_loss in paper is defined as log(1-D(G(z))), however it not suitable with sigmoid activation function.
▽log(1-D(G(z))) = ▽log(1-sigmoid(logits))
= [-sigmoid(logits)*(1-sigmoid(logits))] / [1-sigmoid(logits)]
= -sigmoid(logits)
Imagine such a situation:
When the G is very weak, D can point out those fake image easily -- D(G(z)) or sigmoid(logits) is closed to 0.
We surely hope the gradient can be bigger so that G can change more. However, the vaule of formula above is:
▽log(1-D(G(z))) = -sigmoid(logits)
≈ -0
Gradient is closed to zero. That's really a bad news. So we do some transform:
From minimize[log(1-D(G(z)))] to minimize[- log(D(G(z))]
The gradient is closed to one when G is weak and closed to zero when G can fake image well.
▽log(-D(G(z))) = ▽log(-sigmoid(logits))
= [-sigmoid(logits) * (1-sigmoid(logits))] / [-sigmoid(logits)]
= 1-sigmoid(logits)
"""
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_outputs_fake) * (1 - smooth))
g_loss = tf.reduce_mean(g_loss)
# ========================= end =========================
# ======================== begin: calculate d_loss ========================
# the structure of G has been defined, so we just set reuse=True
d_logits_real, d_outputs_real = get_discriminator(real_imgs, True, True)
"""!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
D_real_loss: maximize(log(D(z)) to minimize(-log(D(z))
In paper, the target is to maximize(log(D(z)), because optimizers in tensorflow are all designed to reduce
loss, reduce the value of given formula, so we change the target from maximize(log(D(z)) to minimize(-log(D(z)).
that enough, because we find when D is weak and D(z) closed to 0, the gradient of logists is closed to 1.
"""
d_loss_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_outputs_real) * (1 - smooth))
d_loss_real = tf.reduce_mean(d_loss_real)
"""!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
D_fake_loss: maximize(log(1 - D(G(z)))) to minimize(-log(1 - D(G(z))))
In paper, the D_fake_loss symbols the ability of picking up fake image of D. When G is very weak,
D can point out those fake image easily. Now, the gradient for G should be bigger but that's opposite from D, the D
is good enough, the gradient for D should be smaller. This formula can be suitable without do any change more.
"""
d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_outputs_fake))
d_loss_fake = tf.reduce_mean(d_loss_fake)
d_loss = tf.add(d_loss_real, d_loss_fake)
# ========================= end =========================
return g_loss, d_loss
def get_optimizer(g_loss, d_loss):
"""
Define the optimizer for minimizing the loss. Here we pick the AdamOptimizer. Surely you can replace with other OPT.
:param g_loss: loss of G net. calculated by 'get_loss'
:param d_loss: loss of D net. calculated by 'get_loss'
:return:
g_opt: Optimizer for g_loss
d_opt: Optimizer for d_loss
"""
# 'tf.trainable_variables()' would return variables trainable in graph. We divide variables to G_vars and D_vars.
train_vars = tf.trainable_variables()
g_vars = [var for var in train_vars if var.name.startswith("generator")]
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
g_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
d_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
return g_opt, d_opt
def show_generator_output(sess, noise_holder):
"""
get the outputs of G, the input of G will be create with 'random' in function inside.
:param sess: session
:param noise_holder: the placeholder of input of G
:return:
samples: imgs generated by G, but the range of value is still [-1, 1]
"""
batch_noise = np.random.uniform(-1, 1, size=(n_samples, noise_size))
samples = sess.run(get_generator(noise_holder, False, True), feed_dict={noise_holder: batch_noise})
return samples
def train():
"""
the training part of project, we will do such: define graph, send data, optimize, save model...
:return:
"""
# define graph of DCGAN
inputs_real, inputs_noise = get_inputs()
g_loss, d_loss = get_loss(inputs_noise, inputs_real)
g_train_opt, d_train_opt = get_optimizer(g_loss, d_loss)
# feed with data -- start to train
with tf.Session() as sess:
begin_time = 0
saver = tf.train.Saver(max_to_keep=max_to_keep)
sess.run(tf.global_variables_initializer())
#
# =============== recover the net param from saved model for further training =================
# begin_time = 10
# recover = tf.train.import_meta_graph(image_util.model_path+'model-{}.meta'.format(begin_time))
# recover.restore(sess, tf.train.latest_checkpoint(image_util.model_path))
# =============================================================================================
for epoch in range(begin_time, epochs):
images = image_util.get_imgs(image_num)
for batch_i in range(images.shape[0] // batch_size):
print("training in (epoch = {}, batch = {})".format(epoch, batch_i))
batch_images = images[batch_i * batch_size: (batch_i + 1) * batch_size]
# ============= we choose the image data sequential, you can also select with random ==============
# batch_images = images.tolist()
# batch_images = random.sample(batch_images, batch_size)
# batch_images = np.array(batch_images)
# batch_images.reshape(-1,64,64,3)
# ==================================================================================================
# reflect the range of input of G from [0 1] to [-1, 1]
batch_images = batch_images * 2 - 1
batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
# doing k iteration for G after doing one iteration for D was recommended in paper. Here k=1
sess.run(d_train_opt, feed_dict={inputs_real: batch_images, inputs_noise: batch_noise})
sess.run(g_train_opt, feed_dict={inputs_real: batch_images, inputs_noise: batch_noise})
train_loss_g = g_loss.eval({inputs_real: batch_images, inputs_noise: batch_noise})
train_loss_d = d_loss.eval({inputs_real: batch_images, inputs_noise: batch_noise})
print("g_loss:", train_loss_g)
print("d_loss:", train_loss_d)
# save images generated by G after each epoch
samples = show_generator_output(sess, inputs_noise)
image_util.plot_images(epoch, samples)
# save model
if epoch % break_time == 0:
saver.save(sess, image_util.model_path+'model', global_step=epoch)
if __name__ == '__main__':
with tf.Graph().as_default():
train()
|
# -*- coding: utf-8 -*-
from .colors import colors as c
import os
def breachcomp_check(targets, breachcomp_path):
# https://gist.github.com/scottlinux/9a3b11257ac575e4f71de811322ce6b3
try:
import subprocess
c.info_news("Looking up targets in BreachCompilation")
query_bin = os.path.join(breachcomp_path, "query.sh")
subprocess.call(["chmod", "+x", query_bin])
for t in targets:
procfd = subprocess.run([query_bin, t.email], stdout=subprocess.PIPE)
try:
output = procfd.stdout.decode("utf-8")
except Exception as e:
c.bad_news(f"Could not decode bytes for {t.email} results")
output = procfd.stdout
print(output[:85], "[...]")
continue
if len(output) != 0:
split_output = output.split("\n")
for line in split_output:
if ":" in line:
t.data.append(("BC_PASS", line.split(":")[1]))
c.good_news(
"Found BreachedCompilation entry {line}".format(line=line)
)
return targets
except Exception as e:
c.bad_news("Breach compilation")
print(e)
|
from application.core.entity.account import Account
from application.core.exception.dashboardplus_exception import (
InputValidationException,
EntityAlreadyExistsException,
AccountAlreadyExistsException, PersitenceException,
UnexpectedFailureException
)
from application.core.port.create_account_port import CreateAccountPort
from application.core.port.encrypt_password_port import EncryptPasswordPort
from application.core.port.insert_account_port import InsertAccountPort
from application.core.port.validate_account_payload_port import ValidateAccountPayloadPort
class CreateAccountStep:
def __init__(self,
validator: ValidateAccountPayloadPort,
encryptor: EncryptPasswordPort,
factory: CreateAccountPort,
repository: InsertAccountPort):
self.repository = repository
self.factory = factory
self.encryptor = encryptor
self.validator = validator
def execute(self, payload):
self._validate_payload(payload)
creation_payload = self._generate_creation_payload(payload)
account = self.factory.create_account(creation_payload)
return self._insert_account(account)
def _validate_payload(self, payload: dict):
errors = self.validator.validate_payload(payload)
if errors:
raise InputValidationException(messages=errors)
def _generate_creation_payload(self, payload: dict) -> dict:
return {
'username': payload['username'],
'email': payload['email'],
'hash_password': self.encryptor.encrypt_password(payload['password']),
'email_confirmed': False
}
def _insert_account(self, account: Account) -> int:
try:
account_id = self.repository.insert(account)
except EntityAlreadyExistsException:
raise AccountAlreadyExistsException()
except PersitenceException:
raise UnexpectedFailureException()
else:
return account_id
|
from .base import AbstractRiskManager
from ..event import OrderEvent
class ExampleRiskManager(AbstractRiskManager):
def refine_orders(self, portfolio, sized_order):
"""
This ExampleRiskManager object simply lets the
sized order through, creates the corresponding
OrderEvent object and adds it to a list.
"""
order_event = OrderEvent(
sized_order.ticker,
sized_order.action,
sized_order.quantity
)
return [order_event]
|
"""
Setting the region
==================
Many of the plotting functions take the ``region`` parameter, which sets
the area that will be shown in the figure. This tutorial covers the different types of
inputs that it can accept.
.. note::
This tutorial assumes the use of a Python notebook, such as IPython or Jupyter Notebook.
To see the figures while using a Python script instead, use
``fig.show(method="external")`` to display the figure in the default PDF viewer.
To save the figure, use ``fig.savefig("figname.pdf")`` where ``"figname.pdf"``
is the desired name and file extension for the saved figure.
"""
import pygmt
########################################################################################
# Coordinates
# -----------
#
# A string of coordinates can be passed to ``region``, in the form of
# *xmin*/*xmax*/*ymin*/*ymax*.
fig = pygmt.Figure()
fig.coast(
# Set the x-range from 10E to 20E and the y-range to 35N to 45N
region="10/20/35/45",
# Set projection to Mercator, and the figure size to 15 centimeters
projection="M15c",
# Set the color of the land to light gray
land="lightgray",
# Set the color of the water to white
water="white",
# Display the national borders and set the pen thickness to 0.5p
borders="1/0.5p",
# Display the shorelines and set the pen thickness to 0.5p
shorelines="1/0.5p",
# Set the frame to display annotations and gridlines
frame="ag",
)
fig.show()
########################################################################################
#
# The coordinates can be passed to ``region`` as a list, in the form of
# [*xmin*,\ *xmax*,\ *ymin*,\ *ymax*].
fig = pygmt.Figure()
fig.coast(
# Set the x-range from 10E to 20E and the y-range to 35N to 45N
region=[10, 20, 35, 45],
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# Instead of passing axes minima and maxima, the coordinates can be passed for the
# bottom-left and top-right corners. The string format takes the coordinates for the
# bottom-left and top-right coordinates. To specify corner coordinates, append **+r**
# at the end of the ``region`` string.
fig = pygmt.Figure()
fig.coast(
# Set the bottom-left corner as 10E, 35N and the top-right corner as 20E, 45N
region="10/35/20/45+r",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
# Global regions
# --------------
#
# In addition to passing coordinates, the argument **d** can be passed to set the
# region to the entire globe. The range is 180W to 180E (-180, 180) and 90S to
# 90N (-90 to 90). With no parameters set for the projection, the figure defaults to be
# centered at the mid-point of both x- and y-axes. Using **d**\ , the figure is
# centered at (0, 0), or the intersection of the equator and prime meridian.
fig = pygmt.Figure()
fig.coast(
region="d",
projection="Cyl_stere/12c",
land="darkgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# The argument **g** can be passed, which encompasses the entire globe. The range is
# 0E to 360E (0, 360) and 90S to 90N (-90 to 90). With no parameters set for the
# projection, the figure is centered at (180, 0), or the intersection of the equator and
# International Date Line.
fig = pygmt.Figure()
fig.coast(
region="g",
projection="Cyl_stere/12c",
land="darkgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
# ISO code
# --------
#
# The ``region`` can be set to include a specific area specified by the two-character
# ISO 3166-1 alpha-2 convention
# (for further information: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2).
fig = pygmt.Figure()
fig.coast(
# Set the figure region to encompass Japan with the ISO code "JP"
region="JP",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# The area encompassed by the ISO code can be expanded by appending **+r**\ *increment*
# to the ISO code. The *increment* unit is in degrees, and if only one value is added it
# expands the range of the region in all directions. Using **+r** expands the
# final region boundaries to be multiples of *increment* .
fig = pygmt.Figure()
fig.coast(
# Expand the region boundaries to be multiples of 3 degrees in all directions
region="JP+r3",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# Instead of expanding the range of the plot uniformly in all directions, two values
# can be passed to expand differently on each axis. The format is *xinc*/*yinc*.
fig = pygmt.Figure()
fig.coast(
# Expand the region boundaries to be multiples of 3 degrees on the x-axis
# and 5 degrees on the y-axis.
region="JP+r3/5",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# Instead of expanding the range of the plot uniformly in all directions, four values
# can be passed to expand differently in each direction.
# The format is *winc*/*einc*/*sinc*/*ninc*, which expands on the west,
# east, south, and north axes.
fig = pygmt.Figure()
fig.coast(
# Expand the region boundaries to be multiples of 3 degrees to the west,
# 5 degrees to the east, 7 degrees to the south, and 9 degrees to the north.
region="JP+r3/5/7/9",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# The ``region`` increment can be appended with **+R**, which adds the increment
# without rounding.
fig = pygmt.Figure()
fig.coast(
# Expand the region setting outside the range of Japan by 3 degrees in all
# directions, without rounding to the nearest increment.
region="JP+R3",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
########################################################################################
#
# The ``region`` increment can be appended with **+e**, which is like **+r** and
# expands the final region boundaries to be multiples of *increment*. However,
# it ensures that the bounding box extends by at least 0.25 times the increment.
fig = pygmt.Figure()
fig.coast(
# Expand the region boundaries to be multiples of 3 degrees in all directions
region="JP+e3",
projection="M12c",
land="lightgray",
water="white",
borders="1/0.5p",
shorelines="1/0.5p",
frame="ag",
)
fig.show()
|
import laia.common.logging as log
def test_filepath(tmpdir):
filepath = tmpdir / "test"
log.config(filepath=filepath)
log.info("test!")
log.clear()
assert filepath.exists()
def test_filename(tmpdir):
with tmpdir.as_cwd():
filepath = "test"
log.config(filepath=filepath)
log.info("test!")
log.clear()
assert tmpdir.join(filepath).exists()
|
from tkinter import *
from random import choice
import pandas
BACKGROUND_COLOR = "#B1DDC6"
to_learn = {}
word = {}
# ---------------------------- PANDAS LOGIC ------------------------------- #
try:
data = pandas.read_csv("./data/words_to_learn.csv")
except FileNotFoundError:
data = pandas.read_csv("./data/en_ru.csv")
to_learn = data.to_dict(orient="records")
else:
to_learn = data.to_dict(orient="records")
# ---------------------------- LOGIC ------------------------------- #
def next_word():
global word, timer
timer = window.after_cancel(timer)
word = choice(to_learn)
eng = word["English"]
canvas.itemconfig(bg_card, image=front_card)
canvas.itemconfig(lang_text, text="English", fill="black")
canvas.itemconfig(word_text, text=eng, fill="black")
timer = window.after(3000, translate)
def known():
to_learn.remove(word)
next_word()
def unknown():
new_dict = pandas.DataFrame(to_learn)
new_dict.to_csv("./data/words_to_learn.csv", index=False)
next_word()
def translate():
rus = word["Russian"]
canvas.itemconfig(bg_card, image=back_card)
canvas.itemconfig(lang_text, text="Russian", fill="white")
canvas.itemconfig(word_text, text=rus, fill="white")
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Flashy")
window.config(padx=50, pady=50, bg=BACKGROUND_COLOR)
timer = window.after(3000, translate)
# Canvas
canvas = Canvas(width=800, height=526, highlightthickness=0)
front_card = PhotoImage(file="./images/card_front.png")
back_card = PhotoImage(file="./images/card_back.png")
bg_card = canvas.create_image(400, 263, image=front_card)
canvas.config(bg=BACKGROUND_COLOR)
lang_text = canvas.create_text(400, 150, text="Title", font=("Arial", 40, "italic"))
word_text = canvas.create_text(400, 263, text="Word", font=("Arial", 60, "bold"))
canvas.grid(column=0, row=0, columnspan=2)
# Buttons
yes_img = PhotoImage(file="./images/right.png")
yes_button = Button(image=yes_img, highlightthickness=0, command=known)
yes_button.grid(column=0, row=1)
no_img = PhotoImage(file="./images/wrong.png")
no_button = Button(image=no_img, highlightthickness=0, command=unknown)
no_button.grid(column=1, row=1)
next_word()
window.mainloop()
|
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numba import cuda
from numba.cuda.cudadrv.driver import driver
from librmm_cffi import librmm as rmm
def row_matrix(df):
"""Compute the C (row major) version gpu matrix of df
:param col_major: an `np.ndarray` or a `DeviceNDArrayBase` subclass.
If already on the device, its stream will be used to perform the
transpose (and to copy `row_major` to the device if necessary).
To be replaced by CUDA ml-prim in upcoming version
"""
cols = [df._cols[k] for k in df._cols]
ncols = len(cols)
nrows = len(df)
dtype = cols[0].dtype
col_major = df.as_gpu_matrix(order='F')
row_major = rmm.device_array((nrows, ncols), dtype=dtype, order='C')
tpb = driver.get_device().MAX_THREADS_PER_BLOCK
bpg = (nrows + tpb - 1) // tpb
@cuda.jit
def kernel(_col_major, _row_major):
tid = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if tid >= nrows:
return
_col_offset = 0
while _col_offset < _col_major.shape[1]:
col_idx = _col_offset
_row_major[tid, col_idx] = _col_major[tid, col_idx]
_col_offset += 1
kernel[bpg, tpb](col_major, row_major)
return row_major
|
# -*- coding: utf-8 -*-
### Variables globals ###
host = '' # Host que es passa al socket
port = 12345 # Port del socket
backlog = 5 # Int que es passa al listen
size = 1024 # Mida màxima del paquets
running = 1 # Per mantenir el loop
sockets = [] # Llista temporal de sockets
chat_id_admin = 000000 # Chat_id de l'empresa, permet obrir totes les taquilles.
CodiAcces = "CODI_ALTES" #Codi per les altes.
token = "PLACE_YOUR_TOKEN_HERE" #Token del bot.
######
|
from django.views.generic import TemplateView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from . import models
from . import forms
from . import filters
from . import consts
class Log(TemplateView):
template_name = None # must be specified by
model = models.Log
form_class = forms.Log
filter_class = filters.Log
paginate_by = 30
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
qs = self.get_queryset()
context['page'] = self._get_page(qs)
context['object_list'] = qs
context['LOG_LEVEL_CRITICAL'] = consts.LOG_LEVEL_CRITICAL
context['LOG_LEVEL_ERROR'] = consts.LOG_LEVEL_ERROR
context['LOG_LEVEL_WARNING'] = consts.LOG_LEVEL_WARNING
context['LOG_LEVEL_INFO'] = consts.LOG_LEVEL_INFO
context['LOG_LEVEL_DEBUG'] = consts.LOG_LEVEL_DEBUG
context['LOG_LEVEL_NOTSET'] = consts.LOG_LEVEL_NOTSET
form = self.get_form()
form.is_valid()
context['form'] = form
return context
def get_form(self):
return self.form_class(self.request.GET or None)
def get_queryset(self):
form = self.get_form()
if form.is_valid():
params = form.cleaned_data
qs = self.model.objects.all()
qs = self.filter_class(params, qs).qs
return qs
else:
return self.model.objects.none()
def _get_page(self, qs):
paginator = Paginator(qs, self.paginate_by)
page_num = self.request.GET.get('page')
try:
page = paginator.page(page_num)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
return page
|
import nltk
import ner
import re
import json
import os
import urllib2
from urllib import urlencode
from pprint import pprint
# open the contents
with open('CodeAssignmentDataSet.json') as json_file:
json_data = json.load(json_file)
json_file.close()
pprint(json_data[0:4])
# The OMDB API's constant for searching movie titles
titleUrl = "http://www.omdbapi.com/?" # i=&t=
# Fetches a movie from OMDB API as a Json object
# Note: expects a full title name
def fetchMovie(predicate):
predicate = urlencode({'i': predicate, 't': predicate})
request = urllib2.Request(titleUrl + predicate, None, {'user-agent':'whatever'})
opener = urllib2.build_opener()
stream = opener.open(request)
return json.load(stream)
# search names of actors and movie/show titles from description and title
def name(sentence):
r = re.compile('([A-Z]\w+(?=[\s\-][A-Z])(?:[\s\-][A-Z]\w+)+)', re.UNICODE)
match = r.findall(sentence)
#assert(match is not None)
#extracted = match.group(0)
return match #extracted
# take the union of two lists of dictionaries
def union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
name_from_title = name(json_data[2].get('title'))
name_from_description = name(json_data[2].get('description'))
names = union(name_from_title, name_from_description)
print names
# If a fanous movie or actor is confirmed, it will be updated into the orgininal data set
#
for item in json_data[0:4]:
xx = {'FamousMovie': fetchMovie(names[0])['Title']}
item.update(xx)
pprint (json_data[0:4])
|
#! /usr/bin/env python3
# Copyright (c) 2021 Grumpy Cat Software S.L.
#
# This Source Code is licensed under the MIT 2.0 license.
# the terms can be found in LICENSE.md at the root of
# this project, or at http://mozilla.org/MPL/2.0/.
# %%
import numpy as np
import matplotlib.pyplot as plt
import shapelets.compute as sc
from shapelets.data import load_mat
data = load_mat('ItalianPowerDemand.mat')[0:15000, 2]
fig, ax = plt.subplots(figsize=(18, 8))
ax.plot(data)
plt.show()
# %%
s = sc.matrixprofile.snippets(data, 200, 2, 50)
for idx, sn in enumerate(s):
print("Snippet " + repr(idx) + " -> " + repr(round(sn['fraction'] * 100.0)))
# %%
margin = sc.amin(data) * .9
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(18, 18), gridspec_kw={'height_ratios': [2, 1, 1]})
ax2.plot(s[0]['snippet'])
ax1.plot(s[1]['snippet'])
ax0.plot(data)
for idx, sn in enumerate(s):
ax0.plot(sn['neighbors'], (margin * np.ones_like(sn['neighbors'])) + idx, 'o', label=repr(idx), markersize=7,
alpha=.1)
plt.show()
|
from .config import DOSIMode, FeatureFlags, InferenceAlgorithm
from .constants import (
LEAK_NODE,
DEFAULT_SYMPTOM_LEAK_PROPORTION,
DEFAULT_MAX_SYMPTOM_PRIOR,
DEFAULT_MAX_DISEASE_PRIOR,
DEFAULT_APPLY_CONCEPT_GROUPS,
DEFAULT_SINGLE_DISEASE_QUERY,
DEFAULT_IGNORE_OTHER_DISEASES,
DEFAULT_DOSI_MODE,
DEFAULT_INFERENCE_ALGORITHM,
)
from .engine import InferenceEngine
from .inference import calculate_disease_posteriors |
# Matthieu Brucher
# Last Change : 2007-08-29 15:48
"""
Computes a Newton step for a specific function at a specific point
"""
import numpy
import numpy.linalg
class NewtonStep(object):
"""
The Newton step
"""
def __call__(self, function, point, state):
"""
Computes a Newton step based on a function and a point
"""
hessian = function.hessian(point)
gradient = function.gradient(point)
step = (-numpy.linalg.solve(hessian, gradient)).reshape(point.shape)
state['hessian'] = hessian
state['gradient'] = gradient
state['direction'] = step
return step
|
import textwrap
import discretisedfield as df
class Mesh(df.Mesh):
@property
def _script(self):
mx3 = "SetGridSize({}, {}, {})\n".format(self.n[0], self.n[1], self.n[2])
mx3 += "SetCellSize({}, {}, {})\n\n".format(self.cell[0],
self.cell[1],
self.cell[2])
return mx3
|
import dataset
import generate_test_splits
import nltk
sentence = "At eight o'clock on Thursday morning... Arthur didn't feel very good."
tokens = nltk.word_tokenize(sentence)
dataset = DataSet()
generate_test_splits.generate_hold_out_split(dataset, training = 0.9, base_dir="splits") |
#!/usr/bin/env python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Main Madpack installation executable.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import getpass
import re
import os
import glob
import traceback
import subprocess
import datetime
import tempfile
import shutil
import upgrade_util as uu
from utilities import error_
from utilities import info_
from utilities import is_rev_gte
from utilities import get_rev_num
from utilities import run_query
from utilities import get_madlib_dbrev
from utilities import get_dbver
# Required Python version
py_min_ver = [2, 6]
# Check python version
if sys.version_info[:2] < py_min_ver:
print("ERROR: python version too old (%s). You need %s or greater." %
('.'.join(str(i) for i in sys.version_info[:3]), '.'.join(str(i) for i in py_min_ver)))
exit(1)
# Find MADlib root directory. This file is installed to
# $MADLIB_ROOT/madpack/madpack.py, so to get $MADLIB_ROOT we need to go
# two levels up in the directory hierarchy. We use (a) os.path.realpath and
# (b) __file__ (instead of sys.argv[0]) because madpack.py could be called
# (a) through a symbolic link and (b) not as the main module.
maddir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/..") # MADlib root dir
sys.path.append(maddir + "/madpack")
# Import MADlib python modules
import argparse
import configyml
# Some read-only variables
this = os.path.basename(sys.argv[0]) # name of this script
# Default directories
maddir_conf = maddir + "/config" # Config dir
maddir_lib = maddir + "/lib/libmadlib.so" # C/C++ libraries
# Read the config files
ports = configyml.get_ports(maddir_conf) # object made of Ports.yml
rev = configyml.get_version(maddir_conf) # MADlib OS-level version
portid_list = []
for port in ports:
portid_list.append(port)
SUPPORTED_PORTS = ('postgres', 'greenplum', 'hawq')
# Global variables
portid = None # Target port ID (eg: pg90, gp40)
dbver = None # DB version
con_args = {} # DB connection arguments
verbose = None # Verbose flag
keeplogs = None
tmpdir = None
is_hawq2 = False
def _make_dir(dir):
"""
# Create a temp dir
# @param dir temp directory path
"""
if not os.path.isdir(dir):
try:
os.makedirs(dir)
except:
print "ERROR: can not create directory: %s. Check permissions." % dir
exit(1)
# ------------------------------------------------------------------------------
def _internal_run_query(sql, show_error):
"""
Runs a SQL query on the target platform DB
using the default command-line utility.
Very limited:
- no text output with "new line" characters allowed
@param sql query text to execute
@param show_error displays the SQL error msg
"""
return run_query(sql, con_args, show_error)
# ------------------------------------------------------------------------------
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB and HAWQ installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port not in ('greenplum', 'hawq'):
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
link_name = 'greenplum-db' if port == 'greenplum' else 'hawq'
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, link_name))
if os.path.islink(rel_gphome) and os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir
# ------------------------------------------------------------------------------
def _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, pre_sql, upgrade=False,
sc=None):
"""
Run SQL file
@param schema name of the target schema
@param maddir_mod_py name of the module dir with Python code
@param module name of the module
@param sqlfile name of the file to parse
@param tmpfile name of the temp file to run
@param logfile name of the log file (stdout)
@param pre_sql optional SQL to run before executing the file
@param upgrade are we upgrading as part of this sql run
@param sc object of ScriptCleaner
"""
# Check if the SQL file exists
if not os.path.isfile(sqlfile):
error_(this, "Missing module SQL file (%s)" % sqlfile, False)
raise ValueError("Missing module SQL file (%s)" % sqlfile)
# Prepare the file using M4
try:
f = open(tmpfile, 'w')
# Add the before SQL
if pre_sql:
f.writelines([pre_sql, '\n\n'])
f.flush()
# Find the madpack dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/madpack"):
maddir_madpack = maddir + "/ports/" + portid + "/" + dbver + "/madpack"
else:
maddir_madpack = maddir + "/madpack"
maddir_ext_py = maddir + "/lib/python"
m4args = ['m4',
'-P',
'-DMADLIB_SCHEMA=' + schema,
'-DPLPYTHON_LIBDIR=' + maddir_mod_py,
'-DEXT_PYTHON_LIBDIR=' + maddir_ext_py,
'-DMODULE_PATHNAME=' + maddir_lib,
'-DMODULE_NAME=' + module,
'-I' + maddir_madpack,
sqlfile]
info_(this, "> ... parsing: " + " ".join(m4args), verbose)
subprocess.call(m4args, stdout=f)
f.close()
except:
error_(this, "Failed executing m4 on %s" % sqlfile, False)
raise Exception
# Only update function definition
sub_module = ''
if upgrade:
# get filename from complete path without the extension
sub_module = os.path.splitext(os.path.basename(sqlfile))[0]
info_(this, sub_module, verbose)
if sub_module not in sc.get_change_handler().newmodule:
sql = open(tmpfile).read()
sql = sc.cleanup(sql)
open(tmpfile, 'w').write(sql)
# Run the SQL using DB command-line utility
if portid in ('greenplum', 'postgres', 'hawq'):
sqlcmd = 'psql'
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not std:
error_(this, "Command not found: %s" % sqlcmd, True)
runcmd = [sqlcmd, '-a',
'-v', 'ON_ERROR_STOP=1',
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'--no-password',
'-f', tmpfile]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c client_min_messages=notice'
# Open log file
try:
log = open(logfile, 'w')
except:
error_(this, "Cannot create log file: %s" % logfile, False)
raise Exception
# Run the SQL
try:
info_(this, "> ... executing " + tmpfile, verbose)
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
except:
error_(this, "Failed executing %s" % tmpfile, False)
raise Exception
finally:
log.close()
return retval
# ------------------------------------------------------------------------------
def _check_db_port(portid):
"""
Make sure we are connected to the expected DB platform
@param portid expected DB port id - to be validates
"""
# Postgres
try:
row = _internal_run_query("SELECT version() AS version", True)
except:
error_(this, "Cannot validate DB platform type", True)
if row and row[0]['version'].lower().find(portid) >= 0:
if portid == 'postgres':
if row[0]['version'].lower().find('greenplum') < 0:
return True
elif portid == 'greenplum':
if row[0]['version'].lower().find('hawq') < 0:
return True
elif portid == 'hawq':
return True
return False
# ------------------------------------------------------------------------------
def _print_revs(rev, dbrev, con_args, schema):
"""
Print version information
@param rev OS-level MADlib version
@param dbrev DB-level MADlib version
@param con_args database connection arguments
@param schema MADlib schema name
"""
info_(this, "MADlib tools version = %s (%s)" % (str(rev), sys.argv[0]), True)
if con_args:
try:
info_(this, "MADlib database version = %s (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
except:
info_(this, "MADlib database version = [Unknown] (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
return
# ------------------------------------------------------------------------------
def _plpy_check(py_min_ver):
"""
Check pl/python existence and version
@param py_min_ver min Python version to run MADlib
"""
info_(this, "Testing PL/Python environment...", True)
# Check PL/Python existence
rv = _internal_run_query("SELECT count(*) AS CNT FROM pg_language "
"WHERE lanname = 'plpythonu'", True)
if int(rv[0]['cnt']) > 0:
info_(this, "> PL/Python already installed", verbose)
else:
info_(this, "> PL/Python not installed", verbose)
info_(this, "> Creating language PL/Python...", True)
try:
_internal_run_query("CREATE LANGUAGE plpythonu;", True)
except:
error_(this, """Cannot create language plpythonu. Please check if you
have configured and installed portid (your platform) with
`--with-python` option. Stopping installation...""", False)
raise Exception
# Check PL/Python version
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_internal_run_query("""
CREATE OR REPLACE FUNCTION plpy_version_for_madlib()
RETURNS TEXT AS
$$
import sys
# return '.'.join(str(item) for item in sys.version_info[:3])
return str(sys.version_info[:3]).replace(',','.').replace(' ','').replace(')','').replace('(','')
$$
LANGUAGE plpythonu;
""", True)
rv = _internal_run_query("SELECT plpy_version_for_madlib() AS ver;", True)
python = rv[0]['ver']
py_cur_ver = [int(i) for i in python.split('.')]
if py_cur_ver >= py_min_ver:
info_(this, "> PL/Python version: %s" % python, verbose)
else:
error_(this, "PL/Python version too old: %s. You need %s or greater"
% (python, '.'.join(str(i) for i in py_min_ver)), False)
raise Exception
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
info_(this, "> PL/Python environment OK (version: %s)" % python, True)
# ------------------------------------------------------------------------------
def _db_install(schema, dbrev, testcase):
"""
Install MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
@param testcase command-line args for a subset of modules
"""
info_(this, "Installing MADlib into %s schema..." % schema.upper(), True)
temp_schema = schema + '_v' + ''.join(map(str, get_rev_num(dbrev)))
# Check the status of MADlib objects in database
madlib_exists = False if dbrev is None else True
# Test if schema is writable
try:
_internal_run_query("CREATE TABLE %s.__madlib_test_table (A INT);" % schema, False)
_internal_run_query("DROP TABLE %s.__madlib_test_table;" % schema, False)
schema_writable = True
except:
schema_writable = False
# CASE #1: Target schema exists with MADlib objects:
if schema_writable and madlib_exists:
# work-around before UDT is available in HAWQ
if portid == 'hawq':
hawq_overwrite_msg = (
"***************************************************************************"
"* Schema MADLIB already exists"
"* MADlib objects will be overwritten to the 'MADLIB' schema"
"* It may drop any database objects (tables, views, etc.) that depend on 'MADLIB' SCHEMA"
"***************************************************************************"
"Would you like to continue? [Y/N]")
info_(this, hawq_overwrite_msg)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
info_(this, 'Installation stopped.', True)
return
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
_db_create_objects(schema, None, testcase=testcase, hawq_debug=True)
else:
schema_overwrite_msg = (
"***************************************************************************"
"* Schema {0} already exists"
"* Installer will rename it to {1}"
"***************************************************************************"
"Would you like to continue? [Y/N]".
format(schema.upper(), temp_schema.upper()))
info_(this, schema_overwrite_msg)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
info_(this, 'Installation stopped.', True)
return
# Rename MADlib schema
_db_rename_schema(schema, temp_schema)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, temp_schema)
# Create MADlib objects
try:
_db_create_objects(schema, temp_schema, testcase=testcase)
except:
_db_rollback(schema, temp_schema)
# CASE #2: Target schema exists w/o MADlib objects:
# For HAWQ, after the DB initialization, there is no
# madlib.migrationhistory table, thus madlib_exists is False
elif schema_writable and not madlib_exists:
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
error_(this, "Building database objects failed. "
"Before retrying: drop %s schema OR install MADlib into "
"a different schema." % schema.upper(), True)
#
# CASE #3: Target schema does not exist:
#
elif not schema_writable:
if portid == 'hawq' and not is_hawq2:
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
raise Exception("MADLIB schema is required for HAWQ")
info_(this, "> Schema %s does not exist" % schema.upper(), verbose)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, None)
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
_db_rollback(schema, None)
info_(this, "MADlib %s installed successfully in %s schema." % (str(rev), schema.upper()))
# ------------------------------------------------------------------------------
def _db_upgrade(schema, dbrev):
"""
Upgrade MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
"""
if is_rev_gte(get_rev_num(dbrev), get_rev_num(rev)):
info_(this, "Current MADlib version already up to date.", True)
return
if is_rev_gte(get_rev_num('1.9.1'), get_rev_num(dbrev)):
error_(this, """
MADlib versions prior to v1.10 are not supported for upgrade.
Please try upgrading to v1.10 and then upgrade to this version.
""", True)
return
info_(this, "Upgrading MADlib into %s schema..." % schema.upper(), True)
info_(this, "\tDetecting dependencies...", True)
info_(this, "\tLoading change list...", True)
ch = uu.ChangeHandler(schema, portid, con_args, maddir, dbrev, is_hawq2)
info_(this, "\tDetecting table dependencies...", True)
td = uu.TableDependency(schema, portid, con_args)
info_(this, "\tDetecting view dependencies...", True)
vd = uu.ViewDependency(schema, portid, con_args)
abort = False
if td.has_dependency():
info_(this, "*" * 50, True)
info_(this, "\tFollowing user tables/indexes are dependent on MADlib objects:", True)
info_(this, td.get_dependency_str(), True)
info_(this, "*" * 50, True)
cd_udt = [udt for udt in td.get_depended_udt() if udt in ch.udt]
if len(cd_udt) > 0:
error_(this, """
User has objects dependent on following updated MADlib types!
{0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udt)), False)
# we add special handling for 'linregr_result'
if 'linregr_result' in cd_udt:
info_(this, """Dependency on 'linregr_result' could be due to objects
created from the output of the aggregate 'linregr'.
Please refer to the Linear Regression documentation
<http://madlib.apache.org/docs/latest/group__grp__linreg.html#warning>
for the recommended solution.
""", False)
abort = True
c_udoc = ch.get_udoc_oids()
d_udoc = td.get_depended_udoc_oids()
cd_udoc = [udoc for udoc in d_udoc if udoc in c_udoc]
if len(cd_udoc) > 0:
error_(this, """
User has objects dependent on the following updated MADlib operator classes!
oid={0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udoc)), False)
abort = True
if vd.has_dependency():
info_(this, "*" * 50, True)
info_(this, "\tFollowing user views are dependent on MADlib objects:", True)
info_(this, vd.get_dependency_graph_str(), True)
info_(this, "*" * 50, True)
c_udf = ch.get_udf_signature()
d_udf = vd.get_depended_func_signature('UDF')
cd_udf = [udf for udf in d_udf if udf in c_udf]
if len(cd_udf) > 0:
error_(this, """
User has objects dependent on following updated MADlib functions!
{0}
These objects will fail to work with the updated functions and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udf)), False)
abort = True
c_uda = ch.get_uda_signature()
d_uda = vd.get_depended_func_signature('UDA')
cd_uda = [uda for uda in d_uda if uda in c_uda]
if len(cd_uda) > 0:
error_(this, """
User has objects dependent on following updated MADlib aggregates!
{0}
These objects will fail to work with the new aggregates and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_uda)), False)
abort = True
c_udo = ch.get_udo_oids()
d_udo = vd.get_depended_opr_oids()
cd_udo = [udo for udo in d_udo if udo in c_udo]
if len(cd_udo) > 0:
error_(this, """
User has objects dependent on following updated MADlib operators!
oid={0}
These objects will fail to work with the new operators and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udo)), False)
abort = True
if abort:
error_(this, """------- Upgrade aborted. -------
Backup and drop all objects that depend on MADlib before trying upgrade again.
Use madpack reinstall to automatically drop these objects only if appropriate.""", True)
else:
info_(this, "No dependency problem found, continuing to upgrade ...", True)
info_(this, "\tReading existing UDAs/UDTs...", False)
sc = uu.ScriptCleaner(schema, portid, con_args, ch)
info_(this, "Script Cleaner initialized ...", False)
ch.drop_changed_uda()
ch.drop_changed_udoc()
ch.drop_changed_udo()
ch.drop_changed_udc()
ch.drop_changed_udf()
ch.drop_changed_udt() # assume dependent udf for udt does not change
ch.drop_traininginfo_4dt() # used types: oid, text, integer, float
_db_create_objects(schema, None, True, sc)
info_(this, "MADlib %s upgraded successfully in %s schema." % (str(rev), schema.upper()), True)
# ------------------------------------------------------------------------------
def _db_rename_schema(from_schema, to_schema):
"""
Rename schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
info_(this, "> Renaming schema %s to %s" % (from_schema.upper(), to_schema.upper()), True)
try:
_internal_run_query("ALTER SCHEMA %s RENAME TO %s;" % (from_schema, to_schema), True)
except:
error_(this, 'Cannot rename schema. Stopping installation...', False)
raise Exception
# ------------------------------------------------------------------------------
def _db_create_schema(schema):
"""
Create schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
info_(this, "> Creating %s schema" % schema.upper(), True)
try:
_internal_run_query("CREATE SCHEMA %s;" % schema, True)
except:
info_(this, 'Cannot create new schema. Rolling back installation...', True)
pass
# ------------------------------------------------------------------------------
def _db_create_objects(schema, old_schema, upgrade=False, sc=None, testcase="",
hawq_debug=False):
"""
Create MADlib DB objects in the schema
@param schema Name of the target schema
@param sc ScriptCleaner object
@param testcase Command-line args for modules to install
@param hawq_debug
"""
if not upgrade and not hawq_debug:
# Create MigrationHistory table
try:
info_(this, "> Creating %s.MigrationHistory table" % schema.upper(), True)
_internal_run_query("DROP TABLE IF EXISTS %s.migrationhistory;" % schema, True)
sql = """CREATE TABLE %s.migrationhistory
(id serial, version varchar(255),
applied timestamp default current_timestamp);""" % schema
_internal_run_query(sql, True)
except:
error_(this, "Cannot crate MigrationHistory table", False)
raise Exception
# Copy MigrationHistory table for record keeping purposes
if old_schema:
try:
info_(this, "> Saving data from %s.MigrationHistory table" % old_schema.upper(), True)
sql = """INSERT INTO %s.migrationhistory (version, applied)
SELECT version, applied FROM %s.migrationhistory
ORDER BY id;""" % (schema, old_schema)
_internal_run_query(sql, True)
except:
error_(this, "Cannot copy MigrationHistory table", False)
raise Exception
# Stamp the DB installation
try:
info_(this, "> Writing version info in MigrationHistory table", True)
_internal_run_query("INSERT INTO %s.migrationhistory(version) "
"VALUES('%s')" % (schema, str(rev)), True)
except:
error_(this, "Cannot insert data into %s.migrationhistory table" % schema, False)
raise Exception
# Run migration SQLs
if upgrade:
info_(this, "> Creating/Updating objects for modules:", True)
else:
info_(this, "> Creating objects for modules:", True)
caseset = (set([test.strip() for test in testcase.split(',')])
if testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules/modules
# portspecs is a global variable
for moduleinfo in portspecs['modules']:
# Get the module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
info_(this, "> - %s" % module, True)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
elif os.path.isdir(maddir + "/modules/" + module):
maddir_mod_sql = maddir + "/modules"
else:
# This was a platform-specific module, for which no default exists.
# We can just skip this module.
continue
# Make a temp dir for log files
cur_tmpdir = tmpdir + "/" + module
_make_dir(cur_tmpdir)
# Loop through all SQL files for this module
mask = maddir_mod_sql + '/' + module + '/*.sql_in'
sql_files = glob.glob(mask)
if not sql_files:
error_(this, "No files found in: %s" % mask, True)
# Execute all SQL files for the module
for sqlfile in sql_files:
algoname = os.path.basename(sqlfile).split('.')[0]
if portid == 'hawq' and not is_hawq2 and algoname in ('svec'):
continue
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
retval = _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, None, upgrade,
sc)
# Check the exit status
if retval != 0:
error_(this, "TEST CASE RESULTed executing %s" % tmpfile, False)
error_(this, "Check the log at %s" % logfile, False)
raise Exception
# ------------------------------------------------------------------------------
def _db_rollback(drop_schema, keep_schema):
"""
Rollback installation
@param drop_schema name of the schema to drop
@param keep_schema name of the schema to rename and keep
"""
info_(this, "Rolling back the installation...", True)
if not drop_schema:
error_(this, 'No schema name to drop. Stopping rollback...', True)
# Drop the current schema
info_(this, "> Dropping schema %s" % drop_schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (drop_schema), True)
except:
error_(this, "Cannot drop schema %s. Stopping rollback..." % drop_schema.upper(), True)
# Rename old to current schema
if keep_schema:
_db_rename_schema(keep_schema, drop_schema)
info_(this, "Rollback finished successfully.", True)
raise Exception
# ------------------------------------------------------------------------------
def unescape(string):
"""
Unescape separation characters in connection strings, i.e., remove first
backslash from "\/", "\@", "\:", and "\\".
"""
if string is None:
return None
else:
return re.sub(r'\\(?P<char>[/@:\\])', '\g<char>', string)
# ------------------------------------------------------------------------------
def parseConnectionStr(connectionStr):
"""
@brief Parse connection strings of the form
<tt>[username[/password]@][hostname][:port][/database]</tt>
Separation characters (/@:) and the backslash (\) need to be escaped.
@returns A tuple (username, password, hostname, port, database). Field not
specified will be None.
"""
match = re.search(
r'((?P<user>([^/@:\\]|\\/|\\@|\\:|\\\\)+)' +
r'(/(?P<password>([^/@:\\]|\\/|\\@|\\:|\\\\)*))?@)?' +
r'(?P<host>([^/@:\\]|\\/|\\@|\\:|\\\\)+)?' +
r'(:(?P<port>[0-9]+))?' +
r'(/(?P<database>([^/@:\\]|\\/|\\@|\\:|\\\\)+))?', connectionStr)
return (
unescape(match.group('user')),
unescape(match.group('password')),
unescape(match.group('host')),
match.group('port'),
unescape(match.group('database')))
# ------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
prog="madpack",
description='MADlib package manager (' + str(rev) + ')',
argument_default=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example:
$ madpack install -s madlib -p greenplum -c gpadmin@mdw:5432/testdb
This will install MADlib objects into a Greenplum database called TESTDB
running on server MDW:5432. Installer will try to login as GPADMIN
and will prompt for password. The target schema will be MADLIB.
""")
help_msg = """One of the following options:
install : run sql scripts to load into DB
upgrade : run sql scripts to upgrade
uninstall : run sql scripts to uninstall from DB
reinstall : performs uninstall and install
version : compare and print MADlib version (binaries vs database objects)
install-check : test all installed modules
(uninstall is currently unavailable for the HAWQ port)"""
choice_list = ['install', 'update', 'upgrade', 'uninstall',
'reinstall', 'version', 'install-check']
parser.add_argument('command', metavar='COMMAND', nargs=1,
choices=choice_list, help=help_msg)
parser.add_argument(
'-c', '--conn', metavar='CONNSTR', nargs=1, dest='connstr', default=None,
help="""Connection string of the following syntax:
[user[/password]@][host][:port][/database]
If not provided default values will be derived for PostgerSQL and Greenplum:
- user: PGUSER or USER env variable or OS username
- pass: PGPASSWORD env variable or runtime prompt
- host: PGHOST env variable or 'localhost'
- port: PGPORT env variable or '5432'
- db: PGDATABASE env variable or OS username""")
parser.add_argument('-s', '--schema', nargs=1, dest='schema',
metavar='SCHEMA', default='madlib',
help="Target schema for the database objects.")
parser.add_argument('-p', '--platform', nargs=1, dest='platform',
metavar='PLATFORM', choices=portid_list,
help="Target database platform, current choices: " + str(portid_list))
parser.add_argument('-v', '--verbose', dest='verbose',
action="store_true", help="Verbose mode.")
parser.add_argument('-l', '--keeplogs', dest='keeplogs', default=False,
action="store_true", help="Do not remove installation log files.")
parser.add_argument('-d', '--tmpdir', dest='tmpdir', default='/tmp/',
help="Temporary directory location for installation log files.")
parser.add_argument('-t', '--testcase', dest='testcase', default="",
help="Module names to test, comma separated. Effective only for install-check.")
# Get the arguments
return parser.parse_args()
def main(argv):
args = parse_arguments()
global verbose
verbose = args.verbose
info_(this, "Arguments: " + str(args), verbose)
global keeplogs
keeplogs = args.keeplogs
global tmpdir
try:
tmpdir = tempfile.mkdtemp('', 'madlib.', args.tmpdir)
except OSError, e:
tmpdir = e.filename
error_(this, "cannot create temporary directory: '%s'." % tmpdir, True)
# Parse SCHEMA
if len(args.schema[0]) > 1:
schema = args.schema[0].lower()
else:
schema = args.schema.lower()
# Parse DB Platform (== PortID) and compare with Ports.yml
global portid
if args.platform:
try:
# Get the DB platform name == DB port id
portid = args.platform[0].lower()
ports[portid]
except:
portid = None
error_(this, "Can not find specs for port %s" % (args.platform[0]), True)
else:
portid = None
# Parse CONNSTR (only if PLATFORM and DBAPI2 are defined)
if portid:
connStr = "" if args.connstr is None else args.connstr[0]
(c_user, c_pass, c_host, c_port, c_db) = parseConnectionStr(connStr)
# Find the default values for PG and GP
if portid in SUPPORTED_PORTS:
if c_user is None:
c_user = os.environ.get('PGUSER', getpass.getuser())
if c_pass is None:
c_pass = os.environ.get('PGPASSWORD', None)
if c_host is None:
c_host = os.environ.get('PGHOST', 'localhost')
if c_port is None:
c_port = os.environ.get('PGPORT', '5432')
if c_db is None:
c_db = os.environ.get('PGDATABASE', c_user)
# Set connection variables
global con_args
con_args['host'] = c_host + ':' + c_port
con_args['database'] = c_db
con_args['user'] = c_user
if c_pass is not None:
con_args['password'] = c_pass
# Try connecting to the database
info_(this, "Testing database connection...", verbose)
try:
# check for password only if required
_internal_run_query("SELECT 1", False)
except EnvironmentError:
con_args['password'] = getpass.getpass("Password for user %s: " % c_user)
_internal_run_query("SELECT 1", False)
except:
error_(this, 'Failed to connect to database', True)
# Get DB version
global dbver
dbver = get_dbver(con_args, portid)
global is_hawq2
if portid == "hawq" and is_rev_gte(get_rev_num(dbver), get_rev_num('2.0')):
is_hawq2 = True
else:
is_hawq2 = False
# HAWQ < 2.0 has hard-coded schema name 'madlib'
if portid == 'hawq' and not is_hawq2 and schema.lower() != 'madlib':
error_(this, "*** Installation is currently restricted only to 'madlib' schema ***", True)
# update maddir to use a relative path if available
global maddir
maddir = _get_relative_maddir(maddir, portid)
# Get MADlib version in DB
dbrev = get_madlib_dbrev(con_args, schema)
portdir = os.path.join(maddir, "ports", portid)
supportedVersions = [dirItem for dirItem in os.listdir(portdir)
if os.path.isdir(os.path.join(portdir, dirItem)) and
re.match("^\d+", dirItem)]
if dbver is None:
dbver = ".".join(
map(str, max([versionStr.split('.')
for versionStr in supportedVersions])))
info_(this, "Could not parse version string reported by {DBMS}. Will "
"default to newest supported version of {DBMS} "
"({version}).".format(DBMS=ports[portid]['name'],
version=dbver), True)
else:
info_(this, "Detected %s version %s." % (ports[portid]['name'], dbver),
True)
dbver_split = get_rev_num(dbver)
if portid == "hawq":
# HAWQ (starting 2.0) uses semantic versioning. Hence,
# only need first digit for major version.
if is_rev_gte(dbver_split, get_rev_num('2.0')):
is_hawq2 = True
dbver = str(dbver_split[0])
elif portid == 'greenplum':
if is_rev_gte(dbver_split, get_rev_num('5.0')):
# GPDB (starting 5.0) uses semantic versioning. Hence, only
# need first digit for major version.
dbver = str(dbver_split[0])
elif is_rev_gte(dbver_split, get_rev_num('4.3.5')):
# Due to the ABI incompatibility between 4.3.4 and 4.3.5,
# MADlib treats 4.3.5+ as DB version 4.3ORCA which is
# different from 4.3. The name is suffixed with ORCA since
# optimizer (ORCA) is 'on' by default in 4.3.5+
dbver = '4.3ORCA'
else:
# only need the first two digits for <= 4.3.4
dbver = '.'.join(map(str, dbver_split[:2]))
elif portid == 'postgres':
if is_rev_gte(dbver_split, get_rev_num('10.0')):
# Postgres starting 10.0 uses semantic versioning. Hence,
# only need first digit for major version.
dbver = str(dbver_split[0])
if not os.path.isdir(os.path.join(portdir, dbver)):
error_(this, "This version is not among the %s versions for which "
"MADlib support files have been installed (%s)." %
(ports[portid]['name'], ", ".join(supportedVersions)), True)
# Validate that db platform is correct
if not _check_db_port(portid):
error_(this, "Invalid database platform specified.", True)
# Adjust MADlib directories for this port (if they exist)
global maddir_conf
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/config"):
maddir_conf = maddir + "/ports/" + portid + "/" + dbver + "/config"
else:
maddir_conf = maddir + "/config"
global maddir_lib
if os.path.isfile(maddir + "/ports/" + portid + "/" + dbver +
"/lib/libmadlib.so"):
maddir_lib = maddir + "/ports/" + portid + "/" + dbver + \
"/lib/libmadlib.so"
else:
maddir_lib = maddir + "/lib/libmadlib.so"
# Get the list of modules for this port
global portspecs
portspecs = configyml.get_modules(maddir_conf)
else:
con_args = None
dbrev = None
# Parse COMMAND argument and compare with Ports.yml
# Debugging...
# print "OS rev: " + str(rev) + " > " + str(get_rev_num(rev))
# print "DB rev: " + str(dbrev) + " > " + str(get_rev_num(dbrev))
# Make sure we have the necessary parameters to continue
if args.command[0] != 'version':
if not portid:
error_(this, "Missing -p/--platform parameter.", True)
if not con_args:
error_(this, "Unknown problem with database connection string: %s" % con_args, True)
# COMMAND: version
if args.command[0] == 'version':
_print_revs(rev, dbrev, con_args, schema)
# COMMAND: uninstall/reinstall
if args.command[0] in ('uninstall',) and (portid == 'hawq' and not is_hawq2):
error_(this, "madpack uninstall is currently not available for HAWQ", True)
if args.command[0] in ('uninstall', 'reinstall') and (portid != 'hawq' or is_hawq2):
if get_rev_num(dbrev) == [0]:
info_(this, "Nothing to uninstall. No version found in schema %s." % schema.upper(), True)
return
# Find any potential data to lose
affected_objects = _internal_run_query("""
SELECT
n1.nspname AS schema,
relname AS relation,
attname AS column,
typname AS type
FROM
pg_attribute a,
pg_class c,
pg_type t,
pg_namespace n,
pg_namespace n1
WHERE
n.nspname = '%s'
AND t.typnamespace = n.oid
AND a.atttypid = t.oid
AND c.oid = a.attrelid
AND c.relnamespace = n1.oid
AND c.relkind = 'r'
ORDER BY
n1.nspname, relname, attname, typname""" % schema.lower(), True)
info_(this, "*** Uninstalling MADlib ***", True)
info_(this, "***********************************************************************************", True)
info_(this, "* Schema %s and all database objects depending on it will be dropped!" % schema.upper(), True)
if affected_objects:
info_(this, "* If you continue the following data will be lost (schema : table.column : type):", True)
for ao in affected_objects:
info_(this, '* - ' + ao['schema'] + ' : ' + ao['relation'] + '.' +
ao['column'] + ' : ' + ao['type'], True)
info_(this, "***********************************************************************************", True)
info_(this, "Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go != 'Y' and go != 'N':
go = raw_input('Yes or No >>> ').upper()
# 2) Do the uninstall/drop
if go == 'N':
info_(this, 'No problem. Nothing dropped.', True)
return
elif go == 'Y':
info_(this, "> dropping schema %s" % schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (schema), True)
except:
error_(this, "Cannot drop schema %s." % schema.upper(), True)
info_(this, 'Schema %s (and all dependent objects) has been dropped.' % schema.upper(), True)
info_(this, 'MADlib uninstalled successfully.', True)
else:
return
# COMMAND: install/reinstall
if args.command[0] in ('install', 'reinstall'):
# Refresh MADlib version in DB, None for GP/PG
if args.command[0] == 'reinstall':
print "Setting MADlib database version to be None for reinstall"
dbrev = None
info_(this, "*** Installing MADlib ***", True)
# 1) Compare OS and DB versions.
# noop if OS <= DB.
_print_revs(rev, dbrev, con_args, schema)
if is_rev_gte(get_rev_num(dbrev), get_rev_num(rev)):
info_(this, "Current MADlib version already up to date.", True)
return
# proceed to create objects if nothing installed in DB or for HAWQ < 2.0
elif dbrev is None or (portid == 'hawq' and not is_hawq2):
pass
# error and refer to upgrade if OS > DB
else:
error_(this, """Aborting installation: existing MADlib version detected in {0} schema
To upgrade the {0} schema to MADlib v{1} please run the following command:
madpack upgrade -s {0} -p {2} [-c ...]
""".format(schema, rev, portid), True)
# 2) Run installation
try:
_plpy_check(py_min_ver)
_db_install(schema, dbrev, args.testcase)
except:
error_(this, "MADlib installation failed.", True)
# COMMAND: upgrade
if args.command[0] in ('upgrade', 'update'):
info_(this, "*** Upgrading MADlib ***", True)
dbrev = get_madlib_dbrev(con_args, schema)
# 1) Check DB version. If None, nothing to upgrade.
if not dbrev:
info_(this, "MADlib is not installed in {schema} schema and there "
"is nothing to upgrade. Please use install "
"instead.".format(schema=schema.upper()),
True)
return
# 2) Compare OS and DB versions. Continue if OS > DB.
_print_revs(rev, dbrev, con_args, schema)
if is_rev_gte(get_rev_num(dbrev), get_rev_num(rev)):
info_(this, "Current MADlib version is already up-to-date.", True)
return
if float('.'.join(dbrev.split('.')[0:2])) < 1.0:
info_(this, "The version gap is too large, upgrade is supported only for "
"packages greater than or equal to v1.0.", True)
return
# 3) Run upgrade
try:
_plpy_check(py_min_ver)
_db_upgrade(schema, dbrev)
except Exception as e:
# Uncomment the following lines when debugging
print "Exception: " + str(e)
print sys.exc_info()
traceback.print_tb(sys.exc_info()[2])
error_(this, "MADlib upgrade failed.", True)
# COMMAND: install-check
if args.command[0] == 'install-check':
# 1) Compare OS and DB versions. Continue if OS = DB.
if get_rev_num(dbrev) != get_rev_num(rev):
_print_revs(rev, dbrev, con_args, schema)
info_(this, "Versions do not match. Install-check stopped.", True)
return
# Create install-check user
test_user = ('madlib_' +
rev.replace('.', '').replace('-', '_') +
'_installcheck')
try:
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), False)
except:
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), True)
_internal_run_query("CREATE USER %s;" % (test_user), True)
_internal_run_query("GRANT USAGE ON SCHEMA %s TO %s;" % (schema, test_user), True)
# 2) Run test SQLs
info_(this, "> Running test scripts for:", verbose)
caseset = (set([test.strip() for test in args.testcase.split(',')])
if args.testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules
for moduleinfo in portspecs['modules']:
# Get module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
# JIRA: MADLIB-1078 fix
# Skip pmml during install-check (when run without the -t option).
# We can still run install-check on pmml with '-t' option.
if not modset and module in ['pmml']:
continue
info_(this, "> - %s" % module, verbose)
# Make a temp dir for this module (if doesn't exist)
cur_tmpdir = tmpdir + '/' + module + '/test' # tmpdir is a global variable
_make_dir(cur_tmpdir)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
else:
maddir_mod_sql = maddir + "/modules"
# Prepare test schema
test_schema = "madlib_installcheck_%s" % (module)
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE; CREATE SCHEMA %s;" %
(test_schema, test_schema), True)
_internal_run_query("GRANT ALL ON SCHEMA %s TO %s;" %
(test_schema, test_user), True)
# Switch to test user and prepare the search_path
pre_sql = '-- Switch to test user:\n' \
'SET ROLE %s;\n' \
'-- Set SEARCH_PATH for install-check:\n' \
'SET search_path=%s,%s;\n' \
% (test_user, test_schema, schema)
# Loop through all test SQL files for this module
sql_files = maddir_mod_sql + '/' + module + '/test/*.sql_in'
for sqlfile in sorted(glob.glob(sql_files), reverse=True):
# work-around for HAWQ
algoname = os.path.basename(sqlfile).split('.')[0]
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
# If there is no problem with the SQL file
milliseconds = 0
# Run the SQL
run_start = datetime.datetime.now()
retval = _run_sql_file(schema, maddir_mod_py, module,
sqlfile, tmpfile, logfile, pre_sql)
# Runtime evaluation
run_end = datetime.datetime.now()
milliseconds = round((run_end - run_start).seconds * 1000 +
(run_end - run_start).microseconds / 1000)
# Check the exit status
if retval != 0:
result = 'FAIL'
keeplogs = True
# Since every single statement in the test file gets logged,
# an empty log file indicates an empty or a failed test
elif os.path.isfile(logfile) and os.path.getsize(logfile) > 0:
result = 'PASS'
# Otherwise
else:
result = 'ERROR'
# Output result
print "TEST CASE RESULT|Module: " + module + \
"|" + os.path.basename(sqlfile) + "|" + result + \
"|Time: %d milliseconds" % (milliseconds)
if result == 'FAIL':
error_(this, "Failed executing %s" % tmpfile, False)
error_(this, "Check the log at %s" % logfile, False)
# Cleanup test schema for the module
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE;" % (test_schema), True)
# Drop install-check user
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER %s;" % (test_user), True)
# ------------------------------------------------------------------------------
# Start Here
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Run main
main(sys.argv[1:])
# Optional log files cleanup
# keeplogs and tmpdir are global variables
if not keeplogs:
shutil.rmtree(tmpdir)
else:
print "INFO: Log files saved in " + tmpdir
|
# -*- coding: utf-8 -*-
"""
Provides the ownbot User class.
"""
from ownbot.usermanager import UserManager
class User(object):
"""Represents a telegram user.
Args:
name (str): The user's unique telegram username.
user_id (str): The user's unique telegram id.
group (Optional[str]): The user's group.
"""
def __init__(self, name, user_id, group=None):
self.__name = name
self.__id = user_id
self.__group = group
self.__usermanager = UserManager()
def save(self):
"""Saves the user's data.
Saves the users's data to the configuration
file.
Returns:
bool: True if the user was saved, otherwise False
"""
if not self.__group:
return False
self.__usermanager.add_user(self.__name,
self.__group,
user_id=self.__id)
return True
def has_access(self, group):
"""Checks if the user is in given group.
Returns True if given user has access rights
to the given group.
Args:
group (str): The group's name.
Returns:
bool: True if user is in the given group, otherwise False.
"""
is_in_group = self.__usermanager.user_is_in_group(group,
user_id=self.__id)
is_admin = self.__usermanager.user_is_in_group("admin",
user_id=self.__id)
if is_in_group or is_admin:
self.save()
return True
is_admin = self.__usermanager.verify_user(self.__id, self.__name,
"admin")
is_in_group = self.__usermanager.verify_user(self.__id, self.__name,
group)
return is_admin or is_in_group
|
import sqlite3
# Connect to sqlite database
sqliteDb = "../results/results.sqlite"
db = sqlite3.connect(sqliteDb)
try:
db.enable_load_extension(True)
db.load_extension("mod_spatialite")
cur = db.cursor()
# Calculate mean monthly flow rate for last 5 years of data
cur.execute("DROP TABLE IF EXISTS monthlyFlowRates;")
cur.execute("""CREATE TABLE monthlyFlowRates AS
SELECT r.id AS riverId,
STRFTIME("%m", gmf.month||"-01") AS month,
AVG(gmf.flow) * r.upstreamLengthRatio AS flow,
AVG(gmf.flow) * r.upstreamLengthRatio * 8.36
AS heatMW,
ST_Length(r.geometry) * 0.02 AS limitMW
FROM nrfaStations s, nrfaData d, nrfaGmf gmf, riverEdges r
WHERE d.station = s.id
AND gmf.station = s.id
AND s.riverId = r.nearestGaugedEdge
AND DATE(first||"-01") <= DATE("2008-10-01")
AND last = "2013-09"
AND DATE(gmf.month||"-01")
BETWEEN DATE("2008-10-01")
AND DATE("2013-09-01")
GROUP BY r.id, STRFTIME("%m", gmf.month||"-01");""")
# Calculate annual heat production in GWh per year
cur.execute("DROP TABLE IF EXISTS annualHeat;")
cur.execute("SELECT DisableSpatialIndex('annualHeat', 'geometry');")
cur.execute("""CREATE TABLE annualHeat (id INTEGER PRIMARY KEY AUTOINCREMENT,
riverId TEXT,
riverCode INTEGER,
GWhPerYear REAL);""")
cur.execute("""SELECT AddGeometryColumn('annualHeat',
'geometry',
27700,
'LINESTRING');""")
cur.execute("""INSERT INTO annualHeat (riverId, riverCode, GWhPerYear, geometry)
SELECT r.id, r.code, SUM(heatMW * 0.73), r.geometry
FROM riverEdges r, monthlyFlowRates mf, wales w
WHERE r.id = mf.riverId
AND ST_INTERSECTS(r.geometry, w.geometry)
AND r.ROWID IN
(SELECT ROWID
FROM SpatialIndex
WHERE f_table_name = 'riverEdges'
AND search_frame = w.geometry)
GROUP BY r.id;""")
cur.execute("SELECT CreateSpatialIndex('annualHeat', 'geometry');")
# Calculate annual heat production for lakes
cur.execute("DROP TABLE IF EXISTS annualHeatLakes;")
cur.execute("""CREATE TABLE annualHeatLakes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
identifier TEXT,
code INTEGER,
name TEXT,
GWhPerYear REAL);""")
cur.execute("""SELECT AddGeometryColumn('annualHeatLakes',
'geometry',
27700,
'POLYGON');""")
cur.execute("""INSERT INTO annualHeatLakes (identifier, code, name,
GWhPerYear, geometry)
SELECT l.identifier, l.code, l.name, MAX(h.GWhPerYear),
l.geometry
FROM osLakes l, annualheat h
WHERE h.riverCode = 6232
AND ST_Intersects(l.geometry, h.geometry)
AND l.ROWID IN
(SELECT ROWID
FROM SpatialIndex
WHERE f_table_name = 'osLakes'
AND search_frame = h.geometry)
GROUP BY l.id;""")
cur.execute("SELECT CreateSpatialIndex('annualHeatLakes', 'geometry');")
finally:
# Commit changes and close database
db.commit()
db.close()
|
import pymel.core as pm
import lib.transform
import lib.attribute
class Handle(object):
"""Control class for handle objects."""
def __init__(self, handle):
if pm.nodeType(handle) == "transform":
handle = handle.getShape()
self.node = handle
@classmethod
def create(cls, guideNode, name=None):
"""Create a new Handle object."""
handle = pm.createNode("guideHandle")
transform = handle.getParent()
if name:
transform.rename(name)
transform.worldMatrix[0].connect(handle.handleMatrix)
transform.scaleY.connect(transform.scaleX)
transform.scaleY.connect(transform.scaleZ)
pm.aliasAttr("radius", transform.scaleY)
lib.transform.lockHideTransforms(transform,
translate="",
rotate="",
scale="xz")
connectGuideToHandle(guideNode, handle)
pm.select(transform)
return cls(handle)
def __repr__(self):
return "{}.{}({!r})".format(__name__,
self.__class__.__name__,
self.transform.name())
def __eq__(self, other):
return isinstance(other, self.__class__) and other.node == self.node
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.node)
@property
def name(self):
return self.transform.name()
@name.setter
def name(self, name):
self.transform.rename(name)
@property
def transform(self):
return self.node.getParent()
@property
def guideNode(self):
guide = self.node.guide.listConnections(shapes=True)
if guide:
return guide[0]
return None
@property
def forgeID(self):
return self.node.forgeID.get()
@property
def rotateOrder(self):
return self.node.jointRotateOrder.get()
@rotateOrder.setter
def rotateOrder(self, order):
self.node.jointRotateOrder.set(order)
def parent(self):
"""Returns the parent Handle."""
parentNode = self.node.parentHandle.listConnections(shapes=True)
if parentNode:
return self.__class__(parentNode[0])
return None
def setParent(self, other):
"""Set the parent Handle."""
parentHandle = self.parent()
if parentHandle:
pm.disconnectAttr(self.node.parentHandle)
parentHandle.removeChild(self)
if other is not None:
other.node.message.connect(self.node.parentHandle)
other.transform.worldMatrix[0].connect(self.node.parentHandleMatrix)
other.addChild(self)
def childCount(self):
"""Returns the number of child Handles."""
return len(self.node.childHandle.listConnections())
def addChild(self, child):
"""Add a Handle as a child of this Handle."""
idx = lib.attribute.firstOpenIndex(self.node.childHandleMatrix)
child.node.message.connect(self.node.childHandle[idx])
child.transform.worldMatrix[0].connect(self.node.childHandleMatrix[idx])
# If the child is part of the same guide, this handle should orient towards it
# (If not part of the same guide, it may or may not,
# depending on how the child guide is connected)
if child.guideNode == self.guideNode:
self.setOrientTarget(child)
def removeChild(self, child):
"""Remove the Handle as a child of this Handle."""
children = self.children()
children.remove(child)
for subAttr in self.node.childHandle:
pm.removeMultiInstance(subAttr, b=True)
for subAttr in self.node.childHandleMatrix:
pm.removeMultiInstance(subAttr, b=True)
orientTarget = self.orientTarget()
self.setOrientTarget(None)
for handle in children:
self.addChild(handle)
if orientTarget != child:
self.setOrientTarget(orientTarget)
pm.disconnectAttr(child.node.parentHandle)
pm.disconnectAttr(child.node.parentHandleMatrix)
def children(self):
"""Return a list of child Handles."""
return map(self.__class__,
self.node.childHandle.listConnections(shapes=True))
def orientTarget(self):
"""Returns the Handle that this Handle will orient towards."""
target = self.node.orientTarget.listConnections(shapes=True)
if target:
return self.__class__(target[0])
return None
def setOrientTarget(self, target):
"""Set the Handle that this Handle will orient towards."""
if target == self.orientTarget():
return
if target and target not in self.children():
raise RuntimeError(
"Cannot set {} as the orient target, as it is not a child of {}"
.format(target, self))
pm.disconnectAttr(self.node.orientTarget)
pm.disconnectAttr(self.node.orientTargetMatrix)
if target:
target.node.message.connect(self.node.orientTarget)
target.transform.worldMatrix[0].connect(self.node.orientTargetMatrix)
def jointMatrix(self):
return self.node.jointMatrix.get()
def buildJoint(self):
pm.select(clear=True)
jnt = pm.joint(name=self.name)
jnt.rotateOrder.set(self.rotateOrder)
#jnt.side.set(self.side)
matrix = self.jointMatrix()
pm.xform(jnt, matrix=matrix)
return jnt
def connectGuideToHandle(guideNode, handle):
guideNode.message.connect(handle.guide)
guideNode.provideAimVector.connect(handle.useGuideAim)
guideNode.aimVector.connect(handle.aimVector)
guideNode.upVector.connect(handle.upVector)
guideNode.aimAxis.connect(handle.aimAxis)
guideNode.upAxis.connect(handle.upAxis)
guideNode.handleColor.connect(handle.handleColor)
def isHandleType(obj):
if pm.nodeType(obj) == "transform":
obj = obj.getShape()
return obj is not None and pm.nodeType(obj) == "guideHandle"
|
import dataclasses
import dis
import inspect
import itertools
from typing import (
Any,
Callable,
List,
)
def all_slots(class_obj: Any) -> List[str]:
slots = itertools.chain.from_iterable(
getattr(cls, '__slots__', ()) for cls in inspect.getmro(class_obj))
return sorted(set(slots))
def slotted_fields(
_cls=None,
*,
with_dict: bool = False,
with_weakref: bool = False,
):
def decorator(cls):
if '__slots__' in cls.__dict__:
msg = f'{cls.__name__} already has __slots__'
raise TypeError(msg)
cls_dict = dict(cls.__dict__)
field_names = list(f.name for f in dataclasses.fields(cls))
if with_dict:
# TODO: check if any mro class has dict???
# https://github.com/cjrh/autoslot/blob/master/autoslot.py
field_names.append('__dict__')
if with_weakref:
field_names.append('__weakref__')
cls_dict['__slots__'] = tuple(field_names)
for name in field_names:
cls_dict.pop(name, None)
cls_dict.pop('__dict__', None)
cls_dict.pop('__weakref__', None)
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
qualname = getattr(cls, '__qualname__', None)
if qualname is not None:
cls.__qualname__ = qualname
return cls
return decorator if _cls is None else decorator(_cls)
def assigned_attributes(method: Callable) -> List[str]:
bytecode = dis.Bytecode(method)
it1, it2 = itertools.tee(bytecode)
next(it2, None)
self_var = next(iter(method.__code__.co_varnames))
attrs = set()
for first, second in zip(it1, it2):
if (first.argval == self_var and (
first.opname, second.opname == ('LOAD_FAST', 'STORE_ATTR'))):
attrs.add(second.argval)
return sorted(attrs)
|
import pandas as pd
import os
from dags.data.postgredb import DataWareHouse
from dags.data.mongodb import DataLake
import psycopg2.errors
from datetime import datetime
if not os.path.exists(os.getcwd()+ "/local_tools/csv/"):
os.mkdir(os.getcwd()+ "/local_tools/csv/")
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
DELIMITER = ";"
def extract_distinct_shop(last_run) -> list:
DL = DataLake(role='read_and_write')
return list(DL.products.find({'fetched_time': {'$gte': last_run}}, {"_id": 0}).sort([('fetched_time', -1), ('updated_at', -1)]).distinct('shop_id'))
def extract_product_from_shop(shop_id: int, last_run) -> list:
DL = DataLake(role='read_and_write')
return list(DL.products.find({'fetched_time': {'$gte': last_run}, 'shop_id': shop_id}, {"_id": 0}).distinct('shop_id', 'fetched_time').sort([('fetched_time', -1)]))
def extract_product_from_shops(shop_ids: list, last_run) -> list:
DL = DataLake(role='read_and_write')
return list(DL.products.find({'fetched_time': {'$gte': last_run}, 'shop_id': {'$in': shop_ids}}, {"_id": 0}).sort([('fetched_time', -1)]))
def extract(last_run: float) -> list:
DL = DataLake(role='read_and_write')
return list(DL.products.find({'fetched_time': {'$gte': last_run}}, {"_id": 0}).sort([('fetched_time', -1), ('updated_at', -1)]))
def transform(extracted_product: list, sub_name: str="") -> list:
path = os.getcwd()+ "/local_tools/csv/"
INDEXING = False
# Data pre-processing
df = pd.DataFrame(extracted_product)
df["product_price"] = df["product_price"].div(100000)
df.astype(str).drop_duplicates(inplace=True, keep='first')
df.replace(r';', ',', regex=True, inplace=True)
df.replace(r'\n', ' ', regex=True, inplace=True)
def transform_general(keys: list, table_name: str, sub_name: str=sub_name, normalize_key: dict={}, strip_key: list=[], expand: dict={}, expand_inplace: bool=False, replace_column_value: list=[], special_key: str="") -> dict:
file_name = f"{table_name}{sub_name}.csv"
file_path = path + file_name
# data proccessing
data = df.filter(items=keys).astype(str).drop_duplicates()
for key in strip_key:
data[key].replace({r'\s+$': '', r'^\s+': ''}, regex=True, inplace=True)
data[key].replace(' ', '', regex=True, inplace=True)
if normalize_key:
for key, value in normalize_key.items():
try:
if value is int:
data[key] = data[key].apply(lambda x: pd.Series(int(float(x)) if x.lower() != "nan" else 0))
elif value is float:
data[key] = data[key].apply(lambda x: pd.Series(float(x) if x.lower() != "nan" else 0))
elif value is str:
data[key] = data[key].apply(lambda x: pd.Series(str(x)))
elif value is bool:
data[key] = data[key].apply(lambda x: pd.Series(bool(x)) if x else False)
except Exception as e:
print(e)
if replace_column_value:
def transform_column(x):
return x.replace(value['old_value'], value['new_value'])
for column in replace_column_value:
for column_name, values in column.items():
for value in values:
data[column_name] = data[column_name].apply(lambda x: pd.Series(transform_column(x)))
if expand:
keys.extend(expand['new_key'])
if table_name == "product_time":
def to_date(x) -> datetime:
return datetime.fromtimestamp(float(x))
old_cols = data[expand['old_key']]
data['day'] = old_cols.apply(lambda x: pd.Series(to_date(x).day))
data['month'] = old_cols.apply(lambda x: pd.Series(to_date(x).month))
data['year'] = old_cols.apply(lambda x: pd.Series(to_date(x).year))
data['datetime'] = old_cols.apply(lambda x: pd.Series(to_date(x)))
if expand_inplace:
data.drop(columns=expand['old_key'], inplace=expand_inplace)
keys.remove(expand['old_key'])
if special_key != "":
if special_key == "product_brand":
data[special_key] = data[special_key].apply(lambda x: pd.Series(x if x else "No Brand"))
data.to_csv(file_path, index=INDEXING, sep=DELIMITER)
return {'file_path': file_path, 'table_name': table_name, 'keys': keys}
shop = transform_general(
keys = ["shop_id", "fetched_time", "shop_location", "shopee_verified", "is_official_shop"],
table_name = "shop",
replace_column_value = [
{'shopee_verified': [
{"old_value": "None", "new_value": 'False'}
]},
{'is_official_shop': [
{"old_value": "None", "new_value": 'False'},
{"old_value": "nan", "new_value": 'False'},
]},
]
)
product = transform_general(
keys = ["product_id", "fetched_time", "product_name", "product_image", "product_link", "updated_at", "shop_id"],
table_name = "product",
strip_key = ["product_image", "product_link"],
replace_column_value = [
{"product_link": [
{"old_value": "%", "new_value": ""}
]},
{"product_image": [
{"old_value": "%", "new_value": ""}
]}
]
)
product_brand = transform_general(
keys = ["product_id", "fetched_time", "product_brand", "category_id", "label_ids"],
table_name = "product_brand",
replace_column_value = [
{"label_ids": [
{"old_value": "[", "new_value": "{"},
{"old_value": "]", "new_value": "}"},
{"old_value": "nan", "new_value": "{}"},
{"old_value": "None", "new_value": "{}"},
]
},
{"product_brand": [
{"old_value": "None", "new_value": "No Brand"},
{"old_value": "Nan", "new_value": "No Brand"}
]}
],
special_key = "product_brand"
)
product_price = transform_general(
keys = ["product_id", "fetched_time", "product_price", "product_discount", "currency", "is_freeship", "is_on_flash_sale"],
table_name= 'product_price',
replace_column_value = [
{"is_freeship": [
{"old_value": "nan", "new_value": "False"},
{"old_value": "None", "new_value": "False"},
]
},
{"is_on_flash_sale": [
{"old_value": "nan", "new_value": "False"},
{"old_value": "None", "new_value": "False"},
]
},
]
)
product_rating = transform_general(
keys = ["product_id", "fetched_time", "rating_star", "rating_count" , "rating_with_context", "rating_with_image"],
table_name = "product_rating",
replace_column_value = [
{"rating_count": [
{"old_value": "[", "new_value": "{"},
{"old_value": "]", "new_value": "}"}
]
}
],
normalize_key={"rating_with_context": int, "rating_with_image": int, "rating_star": float}
)
product_feedback = transform_general(
keys = ["product_id", "fetched_time", "feedback_count", "liked_count", "view_count"],
table_name = "product_feedback",
normalize_key={"liked_count": int, "view_count": int}
)
product_quantity = transform_general(
keys = ["product_id", "fetched_time", "sold", 'stock'],
table_name = "product_quantity",
normalize_key={"sold": int, "stock": int}
)
product_time = transform_general(
keys = ["product_id", "fetched_time"],
table_name = "product_time",
expand = {
"old_key": "fetched_time",
"new_key": ["day", "month", "year", "datetime"]
},
expand_inplace = False
)
return [shop, product, product_brand, product_price, product_rating, product_feedback, product_quantity, product_time]
def load(transformed_data):
DWH = DataWareHouse(role='admin')
try:
for data in transformed_data:
try:
keys = data['keys']
file_path = data['file_path']
table_name = data['table_name']
DWH.copy_data_by_csv(file_path=file_path, table_name=table_name, keys=keys, delimiter=DELIMITER)
except psycopg2.errors.UniqueViolation as e:
logger.error(e)
continue
else:
# os.remove(file_path)
pass
finally:
os.remove(file_path)
pass
return len(transformed_data)
except Exception as e:
print(e)
return 0
def drop_index():
DataWareHouse(role='admin').drop_index()
def create_view_and_index():
DWH = DataWareHouse(role='admin')
DWH.create_view()
DWH.create_index()
import concurrent.futures
def start(offset: int=0, offset_high=None, limit: int=50):
with open('local_tools/last_run.txt') as f:
last_run = float(f.read())
shop_ids = extract_distinct_shop(last_run)
def etl(shop_ids, sub_name):
products = extract_product_from_shops(shop_ids, last_run)
print("number of product", len(products))
transformed = transform(products, sub_name)
loading = load(transformed)
# print("Loading: ", loading)
return loading
a = len(shop_ids)
if offset_high is not None:
a = offset_high
print("Number of shops: ", a)
drop_index()
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for i in range(offset, a, limit):
shops = shop_ids[i:i+limit]
print(f"{i}. Number of shop: ", len(shops))
futures.append(executor.submit(etl, shops, i))
count = 0
for future in concurrent.futures.as_completed(futures):
count += future.result()
create_view_and_index()
logger.info(count)
if __name__ == "__main__":
start(offset=700, limit=50) |
numeros = []
while True:
numero = int(input('Digite um número: '))
if numero not in numeros:
numeros.append(numero)
print('Núemro incluído na lista.')
else:
print('Número já informado. Não foi adicionado.')
op=str(input('Deseja continuar ? S/N' )).strip().upper()[0]
while op not in 'SsNn':
op = str(input('Opção inválida. Informe opação se deseja continuar ? S/N ')).strip().upper()[0]
if op =='N':
break
numeros.sort()
print(f'Números informados {numeros}')
|
import json
import logging
from datetime import datetime
import requests
from uzemszunet.utils import convert_dotnet_date
from uzemszunet.exceptions import DownloadError
URL = 'https://elmuemasz.hu/elmu/ZavartatasTerkep/GetZavartatasok?vallalat=ELMUEMASZ'
logger = logging.getLogger('uzemszunet')
class Emasz:
def __init__(
self,
telepulesek,
notification_days,
url=URL,
forras_mentese=False,
helyi_forras=False
):
self.url = url
self.have_error = False
self.ses = requests.session()
self.ses.headers.update(
{
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
}
)
self.telepulesek = telepulesek
self.notification_days = notification_days
self.forras_mentese = forras_mentese
self.helyi_forras = helyi_forras
# Session által letöltött JSON.
self.json = []
def process_cim(self, cim):
"""
Feldolgozza az adott cím dict-jét
:param cim: Émász JSON-ból jövő Cim.
"""
if cim["Telepules"] not in self.telepulesek:
return []
uzemszunetek = []
now = datetime.now().date()
for datum in cim["Datumok"]:
# ! Datetime object jön vissza!
datum['From'] = convert_dotnet_date(datum['From'])
datum['To'] = convert_dotnet_date(datum['To'])
diff = (datum['From'].date() - now).days
# Hozzáadás az eredményekhez
if diff in self.notification_days:
uzemszunetek.append(
{
"telepules": cim["Telepules"],
"datum_tol": datum['From'],
"datum_ig": datum['To'],
"utca": cim["Cim"],
"szolgaltato": "Émász",
"terulet": "",
"megjegyzes": "",
}
)
return uzemszunetek
def parse_json(self):
uzemszunetek = []
if self.have_error and self.json is None or len(self.json) == 0:
logger.error(
'Nem sikerült az üzemszüneteket letölteni, nincs mit értelmezni.'
)
return []
for uzemszunet in self.json['zavartatasok']:
try:
# Csak tervezett üzemszüneteket vegye figyelembe
if uzemszunet['Tervezett'] is False:
continue
# Címek lekezelése
for cim in uzemszunet['Cimek']:
uzemszunetek += self.process_cim(cim)
except Exception as e:
logger.exception('Hiba történt:')
self.have_error = True
return uzemszunetek
def get_uzemszunetek(self):
try:
r = self.ses.get(self.url)
r.raise_for_status()
if self.forras_mentese:
with open('emasz.json', 'w') as f:
json.dump(r.json(), f, indent=4)
return r.json()
except requests.exceptions.RequestException as re:
logger.error(
"Probléma az Émász forrás letöltésével:" + str(
re.response.status_code)
)
self.have_error = True
raise DownloadError(
"Probléma az ÉMÁSZ fájl letöltése közben",
re.response.status_code,
re.response.status_text
)
def run(self):
self.have_error = False
if self.helyi_forras:
with open('emasz.json', 'r') as f:
self.json = json.load(f)
else:
self.json = self.get_uzemszunetek()
return self.parse_json()
self.have_error = True
logger.exception('Hiba történt:')
return []
|
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.calculus.infotheory.entropy.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from ..calculusexc import EmptyError, InsideIntervalError
from .utilit import log2
from .utilit import MAX_NGRAM
from .utilit import symbols_to_items
# ----------------------------------------------------------------------------
class sppasEntropy(object):
"""Entropy estimation.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: [email protected]
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
Entropy is a measure of unpredictability of information content.
Entropy is one of several ways to measure diversity.
If we want to look at the entropy on a large series, we could also compute
the entropy for windows to measure the evenness or uncertainties.
By looking at the definition, one could predict the areas that have a
lot of variance would result in a higher entropy and the areas that have
lower variance would result in lower entropy.
"""
def __init__(self, symbols, n=1):
"""Create a sppasEntropy instance with a list of symbols.
:param symbols: (list) a vector of symbols of any type.
:param n: (int) n value for n-gram estimation. n ranges 1..MAX_NGRAM
"""
self._symbols = list()
self._ngram = 1
self.set_symbols(symbols)
self.set_ngram(n)
# -----------------------------------------------------------------------
def set_symbols(self, symbols):
"""Set the list of symbols.
:param symbols: (list) a vector of symbols of any type.
"""
if len(symbols) == 0:
raise EmptyError
self._symbols = symbols
# -----------------------------------------------------------------------
def set_ngram(self, n):
"""Set the n value of n-grams.
:param n: (int) n value for n-gram estimation. n ranges 1..8
"""
n = int(n)
if 0 < n <= MAX_NGRAM:
self._ngram = n
else:
raise InsideIntervalError(n, 1, MAX_NGRAM)
# -----------------------------------------------------------------------
def eval(self):
"""Estimate the Shannon entropy of a vector of symbols.
Shannon's entropy measures the information contained in a message as
opposed to the portion of the message that is determined
(or predictable).
:returns: (float) entropy value
"""
if len(self._symbols) == 0:
raise EmptyError
exr = symbols_to_items(self._symbols, self._ngram)
total = len(self._symbols) - self._ngram + 1
result = 0.
for symbol, occurrences in exr.items():
probability = 1.0 * occurrences / total
self_information = log2(1.0 / probability)
result += (probability * self_information)
return result
|
from abc import ABCMeta, abstractmethod
# Definition of mothods to build complex objects
class JourneyBuilder(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def transportType(transport):
"Transport type"
@staticmethod
@abstractmethod
def originLocation(origin):
"Origin location"
@staticmethod
@abstractmethod
def destinyLocation(destiny):
"Destiny location"
@staticmethod
@abstractmethod
def arrival(arrival):
"Arrival time"
@staticmethod
@abstractmethod
def getInformation():
"Return the whole journey information"
|
from calc import get_expression, parse_args, evaluate_expression
def test_parses_arguments_simple():
expr = "2 2"
arg1, arg2 = parse_args(expr)
assert (arg1, arg2) == ("2", "2")
def test_gets_first_argument():
expr = "(add 2 (multiply 4 5)) 43"
assert get_expression(expr) == "(add 2 (multiply 4 5))"
def test_gets_first_argument_complex():
expr = "(add 2 (multiply 4 5)) (multiply 9 (add 4 5))"
assert get_expression(expr) == "(add 2 (multiply 4 5))"
def test_returns_expression_unmodified():
expr = "(add 2 (multiply 4 5))"
assert get_expression(expr) == "(add 2 (multiply 4 5))"
def test_returns_single_simple():
expr = "8989"
assert get_expression(expr) == "8989"
def test_returns_single_value_argument():
expr = "8989 (add 2 (multiply 4 5))"
assert get_expression(expr) == "8989"
def test_parses_arguments_complex():
expected_arg1 = "(add 2 (multiply 4 5))"
expected_arg2 = "(multiply 9 (add 4 5))"
expr = "{} {}".format(expected_arg1, expected_arg2)
arg1, arg2 = parse_args(expr)
assert (arg1, arg2) == (expected_arg1, expected_arg2)
def test_evaluates_single_value():
expr = "123"
assert evaluate_expression(expr) == 123
def test_evaluates_simple_expression():
expr = "(add 2 4)"
assert evaluate_expression(expr) == 6
def test_evaluates_nested_expression():
expr = "(add 1 (multiply (add 2 1) 3))"
assert evaluate_expression(expr) == 10
def test_evaluates_complex_expression():
expr = "(multiply 3 (multiply (multiply 3 3) 3))"
assert evaluate_expression(expr) == 81
def test_evaluates_complex2_expression():
expr = "(multiply (add 5 5) (add 5 5))"
assert evaluate_expression(expr) == 100
def test_evaluates_exponentation():
expr = "(exponent 2 (add 2 3))"
assert evaluate_expression(expr) == 32 |
import re
from typing import List
from CookLangPy.ingredient import Ingredient
from CookLangPy.cookware import Cookware
from CookLangPy.timer import Timer
replaceSpecialReg = re.compile(r"(([#@])(?:[^#@\n{}]+{\S*}|\w+))")
stepOutReg = re.compile(r"(\$[CIT])(\d+)")
blockCommentReg = re.compile(r".*\[-(.*)-\]")
timerReg = re.compile(r"(~.*{\d+(?:\.\d+)?%(?:hour|minute|second)s?})")
class Step():
def __init__(self) -> None:
self.ingredients : List[Ingredient] = []
self.cookware : List[Cookware] = []
self.timers : List[Timer] = []
self.__text : str = ""
self.comment : str = ""
def parse(text:str) -> 'Step':
s = Step()
if "--" in text:
split = text.split("--")
text = split[0]
s.comment = "".join(split[1:])
m = blockCommentReg.match(text)
if bool(m):
s.comment = m.group(1)
s.setText(text)
return s
def setText(self, text:str):
for match in replaceSpecialReg.findall(text):
if match[1] == "#":
self.cookware.append(Cookware.parse(match[0])[0])
text = replaceSpecialReg.sub(r"$C{0}".format(len(self.cookware)-1), text, 1)
elif match[1] == "@":
self.ingredients.append(Ingredient.parse(match[0])[0])
text = replaceSpecialReg.sub(r"$I{0}".format(len(self.ingredients)-1), text, 1)
for match in timerReg.findall(text):
self.timers.append(Timer.parse(match)[0])
text = timerReg.sub(r"$T{0}".format(len(self.timers)-1), text, 1)
self.__text = text
def __str__(self) -> str:
out = self.__text
for match in stepOutReg.findall(self.__text):
if match[0] == "$C":
out= stepOutReg.sub(str(self.cookware[int(match[1])]), out, 1)
elif match[0] == "$I":
out = stepOutReg.sub(str(self.ingredients[int(match[1])]), out, 1)
elif match[0] == "$T":
out = stepOutReg.sub(str(self.timers[int(match[1])]), out, 1)
return out
def fileOut(self) -> str:
out = self.__text
for match in stepOutReg.findall(self.__text):
if match[0] == "$C":
out= stepOutReg.sub((self.cookware[int(match[1])].fileOut()), out, 1)
elif match[0] == "$I":
out = stepOutReg.sub((self.ingredients[int(match[1])].fileOut()), out, 1)
elif match[0] == "$T":
out = stepOutReg.sub((self.timers[int(match[1])].fileOut()), out, 1)
return out |
class GetESIDataError(Exception):
pass
class GetESIDataNotFound(GetESIDataError):
pass
class InvTypesNotFound(Exception):
pass |
# Copyright (c) Hikvision Research Institute. All rights reserved.
import mmcv
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon, Circle
from mmdet.core.visualization import imshow_det_bboxes, color_val_matplotlib
from mmdet.core import bbox_mapping_back, multiclass_nms
from mmdet.models.detectors.single_stage import SingleStageDetector
from mmdet.models.detectors.detr import DETR
from opera.core.keypoint import bbox_kpt2result, kpt_mapping_back
from ..builder import DETECTORS
@DETECTORS.register_module()
class PETR(DETR):
"""Implementation of `End-to-End Multi-Person Pose Estimation with
Transformers`"""
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_keypoints,
gt_areas,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_keypoints (list[Tensor]): Each item are the truth keypoints for
each image in [p^{1}_x, p^{1}_y, p^{1}_v, ..., p^{K}_x,
p^{K}_y, p^{K}_v] format.
gt_areas (list[Tensor]): mask areas corresponding to each box.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_keypoints,
gt_areas, gt_bboxes_ignore)
return losses
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`.
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3),
scale_factor=(1., 1., 1., 1.)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, img_metas=dummy_img_metas)
bbox_list = self.bbox_head.get_bboxes(
*outs, dummy_img_metas, rescale=True)
return bbox_list
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
batch_size = len(img_metas)
assert batch_size == 1, 'Currently only batch_size 1 for inference ' \
f'mode is supported. Found batch_size {batch_size}.'
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox_kpt2result(det_bboxes, det_labels, det_kpts,
self.bbox_head.num_classes)
for det_bboxes, det_labels, det_kpts in results_list
]
return bbox_results
def merge_aug_results(self, aug_bboxes, aug_kpts, aug_scores, img_metas):
"""Merge augmented detection bboxes and keypoints.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class).
aug_kpts (list[Tensor] or None): shape (n, 17, 2).
img_metas (list): meta information.
Returns:
tuple: (bboxes, kpts, scores).
"""
recovered_bboxes = []
recovered_kpts = []
for bboxes, kpts, img_info in zip(aug_bboxes, aug_kpts, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
flip_direction)
kpts = kpt_mapping_back(kpts, img_shape, scale_factor, flip,
self.test_cfg['flip_pairs'],
flip_direction)
recovered_bboxes.append(bboxes)
recovered_kpts.append(kpts)
bboxes = torch.cat(recovered_bboxes, dim=0)
kpts = torch.cat(recovered_kpts, dim=0)
if aug_scores is None:
return bboxes, kpts
else:
scores = torch.cat(aug_scores, dim=0)
return bboxes, kpts, scores
def aug_test(self, imgs, img_metas, rescale=False):
feats = self.extract_feats(imgs)
aug_bboxes = []
aug_scores = []
aug_kpts = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
outs = self.bbox_head(x, img_meta)
bbox_list = self.bbox_head.get_bboxes(
*outs, img_meta, rescale=rescale)
for det_bboxes, det_labels, det_kpts in bbox_list:
aug_bboxes.append(det_bboxes[:, :4])
aug_scores.append(det_bboxes[:, 4])
aug_kpts.append(det_kpts[..., :2])
merged_bboxes, merged_kpts, merged_scores = self.merge_aug_results(
aug_bboxes, aug_kpts, aug_scores, img_metas)
merged_scores = merged_scores.unsqueeze(1)
padding = merged_scores.new_zeros(merged_scores.shape[0], 1)
merged_scores = torch.cat([merged_scores, padding], dim=-1)
det_bboxes, det_labels, keep_inds = multiclass_nms(
merged_bboxes,
merged_scores,
self.test_cfg.score_thr,
self.test_cfg.nms,
self.test_cfg.max_per_img,
return_inds=True)
det_kpts = merged_kpts[keep_inds]
det_kpts = torch.cat(
(det_kpts, det_kpts.new_ones(det_kpts[..., :1].shape)), dim=2)
bbox_results = [
bbox_kpt2result(det_bboxes, det_labels, det_kpts,
self.bbox_head.num_classes)
]
return bbox_results
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=10,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, keypoint_result = result
segm_result = None
else:
bbox_result, segm_result, keypoint_result = result, None, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
segms = None
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
if isinstance(segms[0], torch.Tensor):
segms = torch.stack(segms, dim=0).detach().cpu().numpy()
else:
segms = np.stack(segms, axis=0)
# draw keypoints
keypoints = None
if keypoint_result is not None:
keypoints = np.vstack(keypoint_result)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = self.imshow_det_bboxes(
img,
bboxes,
labels,
segms,
keypoints,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
def imshow_det_bboxes(self,
img,
bboxes,
labels,
segms=None,
keypoints=None,
class_names=None,
score_thr=0,
bbox_color='green',
text_color='green',
mask_color=None,
thickness=2,
font_size=8,
win_name='',
show=True,
wait_time=0,
out_file=None):
"""Draw bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5).
labels (ndarray): Labels of bboxes.
segms (ndarray or None): Masks, shaped (n,h,w) or None.
bboxes (ndarray): keypoints (with scores), shaped (n, K, 3).
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown. Default: 0.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (str or tuple(int) or :obj:`Color`, optional):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
show (bool): Whether to show the image. Default: True.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param. Default: 0.
out_file (str, optional): The filename to write the image.
Default: None.
Returns:
ndarray: The image with bboxes drawn on it.
"""
assert bboxes.ndim == 2, \
f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'
assert labels.ndim == 1, \
f' labels ndim should be 1, but its ndim is {labels.ndim}.'
assert bboxes.shape[0] == labels.shape[0], \
'bboxes.shape[0] and labels.shape[0] should have the same length.'
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \
f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.'
img = mmcv.imread(img).astype(np.uint8)
if score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
if segms is not None:
segms = segms[inds, ...]
if keypoints is not None:
keypoints = keypoints[inds, ...]
mask_colors = []
if labels.shape[0] > 0:
if mask_color is None:
# random color
np.random.seed(42)
mask_colors = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
else:
# specify color
mask_colors = [
np.array(mmcv.color_val(mask_color)[::-1], dtype=np.uint8)
] * (
max(labels) + 1)
bbox_color = color_val_matplotlib(bbox_color)
# text_color = color_val_matplotlib(text_color)
num_keypoint = keypoints.shape[1]
if num_keypoint == 14:
colors_hp = [(169, 209, 142), (255, 255, 0), (169, 209, 142),
(255, 255, 0), (169, 209, 142), (255, 255, 0),
(0, 176, 240), (252, 176, 243), (0, 176, 240),
(252, 176, 243), (0, 176, 240), (252, 176, 243),
(236, 6, 124), (236, 6, 124)]
elif num_keypoint == 17:
colors_hp = [(236, 6, 124), (236, 6, 124), (236, 6, 124),
(236, 6, 124), (236, 6, 124), (169, 209, 142),
(255, 255, 0), (169, 209, 142), (255, 255, 0),
(169, 209, 142), (255, 255, 0), (0, 176, 240),
(252, 176, 243), (0, 176, 240), (252, 176, 243),
(0, 176, 240), (252, 176, 243)]
else:
raise ValueError(f'unsupported keypoint amount {num_keypoint}')
colors_hp = [color[::-1] for color in colors_hp]
colors_hp = [color_val_matplotlib(color) for color in colors_hp]
if num_keypoint == 14:
edges = [
[0, 2],
[2, 4],
[1, 3],
[3, 5], # arms
[0, 1],
[0, 6],
[1, 7], # body
[6, 8],
[8, 10],
[7, 9],
[9, 11], # legs
[12, 13]
] # neck
ec = [(169, 209, 142),
(169, 209, 142), (255, 255, 0), (255, 255, 0), (255, 102, 0),
(0, 176, 240), (252, 176, 243), (0, 176, 240), (0, 176, 240),
(252, 176, 243), (252, 176, 243), (236, 6, 124)]
elif num_keypoint == 17:
edges = [
[0, 1],
[0, 2],
[1, 3],
[2, 4], # head
[5, 7],
[7, 9],
[6, 8],
[8, 10], # arms
[5, 6],
[5, 11],
[6, 12], # body
[11, 13],
[13, 15],
[12, 14],
[14, 16]
] # legs
ec = [(236, 6, 124), (236, 6, 124), (236, 6, 124), (236, 6, 124),
(169, 209, 142),
(169, 209, 142), (255, 255, 0), (255, 255, 0), (255, 102, 0),
(0, 176, 240), (252, 176, 243), (0, 176, 240), (0, 176, 240),
(252, 176, 243), (252, 176, 243)]
else:
raise ValueError(f'unsupported keypoint amount {num_keypoint}')
ec = [color[::-1] for color in ec]
ec = [color_val_matplotlib(color) for color in ec]
img = mmcv.bgr2rgb(img)
width, height = img.shape[1], img.shape[0]
img = np.ascontiguousarray(img)
EPS = 1e-2
fig = plt.figure(win_name, frameon=False)
plt.title(win_name)
canvas = fig.canvas
dpi = fig.get_dpi()
# add a small EPS to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)
# remove white edges by set subplot margin
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = plt.gca()
ax.axis('off')
polygons = []
color = []
for i, (bbox, label, kpt) in enumerate(zip(bboxes, labels, keypoints)):
bbox_int = bbox.astype(np.int32)
poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],
[bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]
np_poly = np.array(poly).reshape((4, 2))
# polygons.append(Polygon(np_poly))
# color.append(bbox_color)
# label_text = class_names[
# label] if class_names is not None else f'class {label}'
# if len(bbox) > 4:
# label_text += f'|{bbox[-1]:.02f}'
# get left-top corner of all keypoints
bbox_int[0] = np.floor(kpt[:, 0].min()).astype(np.int32)
bbox_int[1] = np.floor(kpt[:, 1].min() - 30).astype(np.int32)
label_text = f'{bbox[-1]:.02f}'
# ax.text(
# bbox_int[0],
# bbox_int[1],
# f'{label_text}',
# bbox={
# 'facecolor': 'black',
# 'alpha': 0.8,
# 'pad': 0.7,
# 'edgecolor': 'none'
# },
# color=text_color,
# fontsize=font_size,
# verticalalignment='top',
# horizontalalignment='left')
for j in range(kpt.shape[0]):
ax.add_patch(
Circle(
xy=(kpt[j, 0], kpt[j, 1]),
radius=2,
color=colors_hp[j]))
for j, e in enumerate(edges):
poly = [[kpt[e[0], 0], kpt[e[0], 1]],
[kpt[e[1], 0], kpt[e[1], 1]]]
np_poly = np.array(poly).reshape((2, 2))
polygons.append(Polygon(np_poly))
color.append(ec[j])
if segms is not None:
color_mask = mask_colors[labels[i]]
mask = segms[i].astype(bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
plt.imshow(img)
p = PatchCollection(
polygons, facecolor='none', edgecolors=color, linewidths=thickness)
ax.add_collection(p)
stream, _ = canvas.print_to_buffer()
buffer = np.frombuffer(stream, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
img = mmcv.rgb2bgr(img)
if show:
# We do not use cvc2 for display because in some cases, opencv will
# conflict with Qt, it will output a warning: Current thread
# is not the object's thread. You an refer to
# https://github.com/opencv/opencv-python/issues/46 for details
if wait_time == 0:
plt.show()
else:
plt.show(block=False)
plt.pause(wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
plt.close()
return img
|
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
from ctypes import *
from ctypes.wintypes import *
import win32file
import win32pipe
from pywin32_testutil import str2bytes
import numpy as np
import struct
import time
global lpszPipename
lpszPipename = u'\\\\.\\pipe\\scTDCserver'
class SCTDC_DAQ():
def __init__(self):
self.pipename = u'\\\\.\\pipe\\scTDCserver'
self.hpipe=[]
def __del__(self):
#↨try:
# win32pipe.DisconnectNamedPipe(self.hpipe)
#except:
# print 'error Disconnecting pipe handle'
try:
win32file.CloseHandle(self.hpipe)
except:
print 'error Releasing pipe handle'
def DisconnectSCTDC(self):
try:
win32file.CloseHandle(self.hpipe)
except:
print 'error Releasing pipe handle'
def ConnectSCTDC(self):
self.hpipe=win32file.CreateFile(self.pipename,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
0,
None)
if (self.hpipe == win32file.INVALID_HANDLE_VALUE):
print "Create File failed"
return -1
def SCTDCStart(self,acqtime):
BUFFSIZE=1024*1024
target_buffer=POINTER(c_char*BUFFSIZE)()
target_buffer.contents=(c_char*BUFFSIZE)()
read_buffer=POINTER(c_char*BUFFSIZE)()
read_buffer.contents=(c_char*BUFFSIZE)()
mybuffer=win32file.AllocateReadBuffer(1024*1024)
win32pipe.SetNamedPipeHandleState(self.hpipe,win32pipe.PIPE_READMODE_MESSAGE, None, None)
success=win32pipe.TransactNamedPipe(self.hpipe,
str2bytes("START %i"%(acqtime)),
mybuffer,
None)
#↨win32pipe.DisconnectNamedPipe(self.hpipe)
#win32file.CloseHandle(self.hpipe)
return success
def SCTDCRead(self,ROI):
mybuffer=win32file.AllocateReadBuffer(1024*1024)
data_pumped = []
data_unpumped = []
ROIlow=ROI[0]
ROIhigh=ROI[1]
#self.ConnectSCTDC()
win32pipe.SetNamedPipeHandleState(self.hpipe,win32pipe.PIPE_READMODE_MESSAGE, None, None)
result = win32pipe.TransactNamedPipe(self.hpipe,
str2bytes("READ"),
mybuffer,
None)
start=time.time()
databytes = np.reshape(np.array([result[1][i] for i in range(len(result[1]))]),(-1,4))
dataint = np.array([struct.unpack('I',databytes[i,:])[0] for i in range(databytes.shape[0])])
#dataint = np.array([struct.unpack('I',databytes[i,:])[0] & 0x07FFFFFF for i in range(databytes.shape[0])])
for d in dataint:
val = d & 0x07FFFFFF
if ((val >= ROIlow) and (val <= ROIhigh)):
if (((d & 0x08000000) >> 27 ) == 1 ):
data_pumped.append(val)
else:
data_unpumped.append(val)
stop=time.time()-start
print stop
return data_unpumped, data_pumped
|
"""Backend for the ground station.
Manages satellites, computes passes and stores observations.
"""
import os
from flask import Flask
def create_app(config=None):
"""Perform set up for the backend."""
app = Flask(__name__, instance_path="/app/data", instance_relative_config=True)
app.config["SATS_DB"] = os.path.join(app.instance_path, "sats.db")
app.config["PASSES_DB"] = os.path.join(app.instance_path, "passes.db")
app.config["TIMEZONE"] = "Australia/Melbourne"
if config is None:
app.config.from_pyfile("config.py", silent=True)
else:
app.config.from_mapping(config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route("/")
def index():
return "<h1>Hello world!</h1>"
from . import database
database.init_app(app)
from .api import api
api.init_app(app)
return app
|
import uuid
from dotenv import load_dotenv
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine import ValidationError
from cassandra.cqlengine.models import Model
from flask import Flask, jsonify, request
from todos import Todos, session, KEYSPACE
from flask_cors import CORS
load_dotenv()
# setup flask
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route("/api/v1/<user_id>/todos", methods=["GET"])
def get_todos(user_id):
res = [dict(x) for x in Todos.filter(user_id=user_id)]
return jsonify(res)
@app.route("/api/v1/<user_id>/todos", methods=["DELETE"])
def delete_todos(user_id):
session.execute(
f"TRUNCATE TABLE {KEYSPACE}.todoitems")
sync_table(Todos)
return jsonify({"success": True})
@app.route("/api/v1/<user_id>/todos", methods=["POST"])
def create_todo(user_id):
try:
request_json = request.get_json(force=True)
item_id = uuid.uuid1()
request_json["item_id"] = item_id
request_json["url"] = f"https://{request.headers.get('X-Forwarded-Host', 'localhost')}/api/v1/{user_id}/todos/{item_id}"
new_todo = Todos.create(user_id=user_id, **request_json)
return jsonify(dict(new_todo))
except ValidationError as e:
return jsonify({"error": str(e)}), 400
@app.route("/api/v1/<user_id>/todos/<item_id>", methods=["GET"])
def get_todo(user_id, item_id):
try:
todo = Todos.get(user_id=user_id, item_id=item_id)
return jsonify(dict(todo))
except Model.DoesNotExist:
return jsonify({"error": "not found"}), 404
@app.route("/api/v1/<user_id>/todos/<item_id>", methods=["DELETE"])
def delete_todo(user_id, item_id):
try:
todo = Todos.get(user_id=user_id, item_id=item_id)
todo.delete()
return jsonify(dict(todo))
except Model.DoesNotExist:
return jsonify({"error": "not found"}), 404
@app.route("/api/v1/<user_id>/todos/<item_id>", methods=["PATCH"])
def update_todo(user_id, item_id):
try:
todo = Todos.get(user_id=user_id, item_id=item_id)
request_json = request.get_json(force=True)
todo.update(**request_json)
return jsonify(dict(todo))
except Model.DoesNotExist:
return jsonify({"error": "not found"}), 404
except ValidationError as e:
return jsonify({"error": str(e)}), 400
|
import json
from enum import Enum
class SpriteParameter(Enum):
FILENAME = "file"
FILESNAME = "files"
SCALE = "scale"
ROTATION = "rotation"
TYPE_IMAGE = "typeImage"
TYPE = "type"
DURATION = "duration"
LOOPED = "looped"
class ObjectParameter(Enum):
THRUST = "thrust"
RESISTANCE = "resistance"
ROTATE_SPEED = "rotateSpeed"
MAGAZINE = "magazine"
LIVE = "live"
COST_BULLET = "costBullet"
RECOVERY_MAGAZINE = "recoveryMagazine"
RECOVERY_ENERGY = "recoveryEnergy"
CONSUMPTION_ENERGY = "consumptionEnergy"
POWER_BANK = "powerBank"
RESET_ENGINE = "resetEngine"
WEAPON_POWER = "weaponPower"
CONST_RELOAD_WEAPON_TIME = "constReloadWeaponTime"
CONST_RECOVERY_ENGINE_TIME = "constRecoveryEngineTime"
CONST_RESET_ENGINE_TIME = "constResetEngineTime"
CONST_ROTATE_FACTOR = "constRotateFactor"
CONST_ROTATE_STEP_FACTOR = "constRotateStepFactor"
DAMAGE = "damage"
ENERGY = "energy"
WEAPON = "weapon"
COUNT_SPLINTERS = "countSplinters"
BONUS = "bonus"
MAX_SPEED = "maxSpeed"
FIRING_SPEED = "firingSpeed"
class SettingsParameter(Enum):
WIDTH = "width"
HEIGHT = "height"
DEBUG = "debug"
FPS = "fps"
LOADLIBS = "load_libraries"
TITTLE = "tittle"
SHOW_FPS = "show_fps"
class JsonManager(object):
def __init__(self, work_dir="."):
self._db = {}
self._work_dir = work_dir
self._data = None
def addJsonData(self, name, file):
try:
self._db.update({name: "%s\\%s" % (self._work_dir, file)})
except IOError as ex:
print("ReadJson: {}".format(ex))
def getDB(self, name):
try:
with open(self._db.get(name)) as json_data:
data = json.load(json_data)
return data
except Exception as ex:
print("Expect this jsonBase {}".format(name))
return None
def getProperty(self, name, key):
with open(self._db.get(name)) as json_data:
data = json.load(json_data)
value = data.get(key)
return value
class PropertyManager(object):
def __init__(self, jsonManager):
self._manager = jsonManager
def _getDB(self, name):
return self._manager.getDB(name)
def get_sprite(self, name_object, parameter):
try:
value = self._getDB(name_object).get("sprite").get(parameter.value)
except AttributeError as ex:
return None
else:
return value
def get_parameter(self, name_object, parameter):
try:
value = self._getDB(name_object).get("parameters").get(parameter.value)
except AttributeError as ex:
return None
else:
return value
|
from tests.scoring_engine.checks.check_test import CheckTest
class TestWordpressCheck(CheckTest):
check_name = 'WordpressCheck'
properties = {
'useragent': 'testagent',
'vhost': 'www.example.com',
'uri': '/wp-login.php'
}
accounts = {
'admin': 'password'
}
cmd = "curl -s -S -4 -v -L --cookie-jar - --header 'Host: www.example.com' -A 'testagent' " \
"--data 'log=admin&pwd=password' '127.0.0.1:1234/wp-login.php'"
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
qidata_gui package
==================
This package contains all widgets necessary to display data and any metadata linked to it.
It also contains several graphical applications using those widgets.
"""
# Standard libraries
import os as _os
# ––––––––––––––––––––––––––––
# Convenience version variable
VERSION = open(_os.path.join(_os.path.dirname(_os.path.realpath(__file__)),
"VERSION")
).read().split()[0]
RESOURCES_DIR = _os.path.join(_os.path.dirname(_os.path.realpath(__file__)),
"_resources")
from qidataframe_widget import QiDataFrameWidget
from qidatasensor_widget import QiDataSensorWidget
from qidataset_widget import QiDataSetWidget
#––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––#
|
########################################################################
#
# File Name: ElementElement.py
#
#
"""
Implementation of the XSLT Spec element stylesheet element.
WWW: http://4suite.com/4XSLT e-mail: [email protected]
Copyright (c) 1999-2000 FourThought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import EMPTY_NAMESPACE
import xml.dom.ext
import xml.dom.Element
import xml.xslt
from xml.xslt import XsltElement, XsltException, Error, AttributeValueTemplate
from xml.xpath import CoreFunctions, Util
class ElementElement(XsltElement):
legalAttrs = ('name', 'namespace', 'use-attribute-sets')
def __init__(self, doc, uri=xml.xslt.XSL_NAMESPACE, localName='element',
prefix='xsl', baseUri=''):
XsltElement.__init__(self, doc, uri, localName, prefix, baseUri)
def setup(self):
self.__dict__['_name'] = AttributeValueTemplate.AttributeValueTemplate(self.getAttributeNS(EMPTY_NAMESPACE, 'name'))
self.__dict__['_namespace'] = AttributeValueTemplate.AttributeValueTemplate(self.getAttributeNS(EMPTY_NAMESPACE, 'namespace'))
self.__dict__['_useAttributeSets'] = string.splitfields(self.getAttributeNS(EMPTY_NAMESPACE, 'use-attribute-sets'))
self.__dict__['_nss'] = xml.dom.ext.GetAllNs(self)
return
def instantiate(self, context, processor):
origState = context.copy()
context.setNamespaces(self._nss)
name = self._name.evaluate(context)
namespace = self._namespace.evaluate(context)
(prefix, local) = xml.dom.ext.SplitQName(name)
if not namespace and prefix:
namespace = context.processorNss[prefix]
#FIXME: Use proper pysax AttributeList objects
processor.writers[-1].startElement(name, namespace)
for attr_set_name in self._useAttributeSets:
split_name = Util.ExpandQName(attr_set_name, namespaces=context.processorNss)
try:
attr_set = processor.attributeSets[split_name]
except KeyError:
raise XsltException(Error.UNDEFINED_ATTRIBUTE_SET, attr_set_name)
attr_set.use(context, processor)
for child in self.childNodes:
context = child.instantiate(context, processor)[0]
processor.writers[-1].endElement(name)
context.set(origState)
return (context,)
def __getinitargs__(self):
return (None, self.namespaceURI, self.localName, self.prefix,
self.baseUri)
def __getstate__(self):
base_state = XsltElement.__getstate__(self)
new_state = (base_state, self._nss, self._name,
self._namespace, self._useAttributeSets)
return new_state
def __setstate__(self, state):
XsltElement.__setstate__(self, state[0])
self._nss = state[1]
self._name = state[2]
self._namespace = state[3]
self._useAttributeSets = state[4]
return
|
from django.shortcuts import render
from django.http import JsonResponse
import socket
# Create your views here.
def get_hosname(request):
data = {
'hostname': socket.gethostname()
}
return JsonResponse(data) |
# -*- coding: utf-8 -*-
# [问题描述]
# 计算输入的字符串中空格,换行符的个数
# [输入形式]
# 输入可以是键盘上的任意字符,最后一行末尾没有换行符
# [输出形式]
# 分别输出空格个数,换行符个数输出结果一行显示,数字之间空格隔开
# [样例输入]
# bb ss pp=
#
# fz
# [样例输出]
# 2 1
str1=''
print('请您输入字符,输入‘#’可结束输入:')
s = input()
while s!='#': # 请您输入字符,输入‘#’可结束输入:
str1 = str1+s+'\n'
s= input()
print('计算得:空格个数(左)换行符个数(右)')
print("%s %s" % (str1.count(' '), str1.count('\n')-1)) #减去末尾的'\n'
|
class SimpleResult():
code = 0
message = ""
def __init__(self, code , message):
self.code = code
self.message = message
def json(self):
return {"code":self.code,"message":self.message}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from gst_file import gst_file_loader
from draw_utils import draw_ped
# EXAMPLE 3.3.1 | Apply CUDA Contour Based Visual Inspection Engine for Video Stream
# Uncomment this if using OpenGL for image rendering
window_name_1 = "Detected Object"
window_name_2 = "Detected Contour"
cv2.namedWindow(window_name_1, flags=cv2.WINDOW_OPENGL) # Window with OpenGL
cv2.namedWindow(window_name_2, flags=cv2.WINDOW_OPENGL) # Window with OpenGL
THRESHOLD_COUNT = 22 # min number of child contour (part_a.jpg : 22 | part_b.jpg : 2)
MIN_AREA = 100 # minimum number of pixel to be counted to reject small contour
# Contour Property parameter for parent contour
MAX_ASPECT_RATIO = 0.3 # (part_a.jpg : 0.3 | part_b.jpg : 0.5)
MIN_ASPECT_RATIO = 0.1 # (part_a.jpg : 0.1 | part_b.jpg : 0.3)
MIN_EXTENT = 0.4
# Contour Property parameter for child contour
MAX_ASPECT_RATIO_CHILD = 1.5
MIN_ASPECT_RATIO_CHILD = 0.5
MIN_EXTENT_CHILD = 0.4
#load video file using GStreamer
cap = cv2.VideoCapture(gst_file_loader("video_a.mp4", useRotate90=True), cv2.CAP_GSTREAMER) # backend GSTREAMER
# cap = cv2.VideoCapture("video_a.mp4", cv2.CAP_FFMPEG) # backend FFMPEG
# cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 1) # applicable for backend FFMPEG only
# GPU memory initialization
ret, img = cap.read()
h, w, c = img.shape
img_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
img_GpuMat.create((w, h), cv2.CV_8UC3) # cv2.CV_8UC3 -> 8bit image 3 channel
hsv_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
hsv_GpuMat.create((w, h), cv2.CV_8UC3) # cv2.CV_8UC3 -> 8bit image 3 channel
h_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
h_GpuMat.create((w, h), cv2.CV_8UC1) # cv2.CV_8UC1 -> 8bit image 1 channel
s_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
s_GpuMat.create((w, h), cv2.CV_8UC1) # cv2.CV_8UC1 -> 8bit image 1 channel
v_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
v_GpuMat.create((w, h), cv2.CV_8UC1) # cv2.CV_8UC1 -> 8bit image 1 channel
mask_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
mask_GpuMat.create((w, h), cv2.CV_8UC1) # cv2.CV_8UC1 -> 8bit image 1 channel
res_GpuMat = cv2.cuda_GpuMat() # Create GpuMat object
res_GpuMat.create((w, h), cv2.CV_8UC3) # cv2.CV_8UC3 -> 8bit image 3 channel
# create CUDA eroding morphological transform object with kernel 3x3
kernel = np.ones((2,2),np.uint8)
MorphObj = cv2.cuda.createMorphologyFilter(cv2.MORPH_ERODE, cv2.CV_8UC1, kernel, iterations = 1)
times = []
while cap.isOpened() :
object_contour = {}
object_count = {}
object_id = 0
e1 = cv2.getTickCount()
ret, img = cap.read()
if not ret :
break
# upload to GPU memory
img_GpuMat.upload(img)
# CUDA convert to hsv
cv2.cuda.cvtColor(img_GpuMat, cv2.COLOR_BGR2HSV, hsv_GpuMat)
# split HSV GPU Mat
cv2.cuda.split(hsv_GpuMat, [h_GpuMat, s_GpuMat, v_GpuMat])
# CUDA Threshold the V(20,150) ~ HSV GPU Mat to get only gray colors
cv2.cuda.inRange(v_GpuMat, 20, 150, mask_GpuMat)
# CUDA Eroding Morphological Transform
MorphObj.apply(mask_GpuMat, mask_GpuMat)
# CUDA bitwise operation
cv2.cuda.bitwise_not(img_GpuMat, res_GpuMat, mask=mask_GpuMat) # apply bitwise NOT to original image -> result image
cv2.cuda.bitwise_not(res_GpuMat, res_GpuMat, mask=mask_GpuMat) # apply bitwise NOT to result image
# Download Matrix to Host Memory
mask = mask_GpuMat.download()
res = res_GpuMat.download()
# find contour from range thresholding
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for cnt, hrcy in zip(contours, hierarchy[0]):
# find contour Area & boungin Rect
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
# calculate aspectRatio & extent
aspectRatio = float(w)/h
rect_area = w*h
extent = float(area)/rect_area
# filter a small contour
if area <= MIN_AREA:
continue
# Find All Extreme Outer Contour [BLUE]
if hrcy[3] == -1 :
if aspectRatio < MAX_ASPECT_RATIO and aspectRatio > MIN_ASPECT_RATIO and extent > MIN_EXTENT:
cv2.drawContours(res, cnt, -1, (255,0,0), 2)
object_contour["object_%d" % object_id] = cnt # insert parent contour
object_count["object_%d" % object_id] = 0 # set initinal count 0
object_id += 1
# Find All child contour [GREEN]
if hrcy[3] != -1 :
if aspectRatio < MAX_ASPECT_RATIO_CHILD and aspectRatio > MIN_ASPECT_RATIO_CHILD and extent > MIN_EXTENT_CHILD:
cv2.drawContours(res, cnt, -1, (0,255,0), 2)
for obj_name in object_contour:
# find the child contour on wich parrent contour
if cv2.pointPolygonTest(object_contour[obj_name], (x, y), measureDist=True) > 0 :
object_count[obj_name] += 1
for obj_name in object_count:
x, y, w, h = cv2.boundingRect(object_contour[obj_name])
# check if number of child contour inside parrent less than threshold count
if object_count[obj_name] < THRESHOLD_COUNT :
img = draw_ped(img, "%s (%d)" % (obj_name, object_count[obj_name]) , x, y, x+w, y+h,
font_size=0.4, alpha=0.6, bg_color=(0,0,255), ouline_color=(0,0,255), text_color=(0,0,0))
else :
img = draw_ped(img, "%s (%d)" % (obj_name, object_count[obj_name]) , x, y, x+w, y+h,
font_size=0.4, alpha=0.6, bg_color=(0,255,0), ouline_color=(0,255,0), text_color=(0,0,0))
cv2.imshow(window_name_1, img)
cv2.imshow(window_name_2, res)
if cv2.waitKey(1) == ord("q"):
break
e2 = cv2.getTickCount()
times.append((e2 - e1)/ cv2.getTickFrequency())
time_avg = np.array(times).mean()
print("Average execution time : %.4fs" % time_avg)
print("Average FPS : %.2f" % (1/time_avg))
cv2.destroyAllWindows()
|
#! /usr/bin/python3
from __future__ import division, print_function, unicode_literals
import sys
import wrf433
def callback(d):
print(d)
def main():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: %s csvfile [vcdfile]\n" % sys.argv[0])
csv_fn = sys.argv[1]
print("Decoding CSV from %s" % csv_fn, file = sys.stderr)
vcd_f = None
if len(sys.argv) >= 3:
vcd_fn = sys.argv[2]
vcd_f = open(vcd_fn, 'w')
wrf433.tracer = wrf433.Tracer(vcd_f)
print("Writing VCD trace to %s" % vcd_fn, file = sys.stderr)
mux = wrf433.Mux()
mux.add_decoder(wrf433.ArcDecoder(callback))
mux.add_decoder(wrf433.LearningCodeDecoder(callback))
mux.add_decoder(wrf433.EsicDecoder(callback))
for l in open(csv_fn):
parts = l.split(',')
t = float(parts[0])
v = int(parts[1])
mux.receive(t, v)
if vcd_f:
vcd_f.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the POSIX time implementation."""
from __future__ import unicode_literals
import decimal
import unittest
from dfdatetime import posix_time
class PosixTimeEpochTest(unittest.TestCase):
"""Tests for the POSIX time epoch."""
def testInitialize(self):
"""Tests the __init__ function."""
posix_epoch = posix_time.PosixTimeEpoch()
self.assertIsNotNone(posix_epoch)
class PosixTimeTest(unittest.TestCase):
"""Tests for the POSIX timestamp."""
# pylint: disable=protected-access
def testProperties(self):
"""Tests the properties."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
self.assertEqual(posix_time_object.timestamp, 1281643591)
posix_time_object = posix_time.PosixTime()
self.assertIsNone(posix_time_object.timestamp)
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281643591.0'))
posix_time_object = posix_time.PosixTime()
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
posix_time_object = posix_time.PosixTime()
expected_timestamp = 1281571200
posix_time_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281650791
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875-01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281643591
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875+01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = -11644387200
posix_time_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 20:06:31')
posix_time_object = posix_time.PosixTime()
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testCopyToDateTimeStringISO8601(self):
"""Tests the CopyToDateTimeStringISO8601 function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
date_time_string = posix_time_object.CopyToDateTimeStringISO8601()
self.assertEqual(date_time_string, '2010-08-12T20:06:31Z')
# TODO: remove this method when there is no more need for it in dfvfs.
def testCopyToStatTimeTuple(self):
"""Tests the CopyToStatTimeTuple function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (1281643591, None))
posix_time_object = posix_time.PosixTime()
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (None, None))
def testGetDate(self):
"""Tests the GetDate function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
posix_time_object = posix_time.PosixTime()
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetTimeOfDay(self):
"""Tests the GetTimeOfDay function."""
posix_time_object = posix_time.PosixTime(timestamp=1281643591)
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (20, 6, 31))
posix_time_object = posix_time.PosixTime()
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (None, None, None))
class PosixTimeInMillisecondsTest(unittest.TestCase):
"""Tests for the POSIX timestamp in milliseconds."""
# pylint: disable=protected-access
def testProperties(self):
"""Tests the properties."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
self.assertEqual(posix_time_object.timestamp, 1281643591546)
posix_time_object = posix_time.PosixTimeInMilliseconds()
self.assertIsNone(posix_time_object.timestamp)
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281643591.546'))
posix_time_object = posix_time.PosixTimeInMilliseconds()
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
# pylint: disable=protected-access
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
posix_time_object = posix_time.PosixTimeInMilliseconds()
expected_timestamp = 1281571200000
posix_time_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191000
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191546
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281650791546
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546-01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281643591546
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546+01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = -11644387200000
posix_time_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 20:06:31.546')
posix_time_object = posix_time.PosixTimeInMilliseconds()
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testCopyToDateTimeStringISO8601(self):
"""Tests the CopyToDateTimeStringISO8601 function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
date_time_string = posix_time_object.CopyToDateTimeStringISO8601()
self.assertEqual(date_time_string, '2010-08-12T20:06:31.546Z')
# TODO: remove this method when there is no more need for it in dfvfs.
def testCopyToStatTimeTuple(self):
"""Tests the CopyToStatTimeTuple function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (1281643591, 5460000))
posix_time_object = posix_time.PosixTimeInMilliseconds()
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (None, None))
def testGetDate(self):
"""Tests the GetDate function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
posix_time_object = posix_time.PosixTimeInMilliseconds()
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetTimeOfDay(self):
"""Tests the GetTimeOfDay function."""
posix_time_object = posix_time.PosixTimeInMilliseconds(
timestamp=1281643591546)
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (20, 6, 31))
posix_time_object = posix_time.PosixTimeInMilliseconds()
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (None, None, None))
class PosixTimeInMicrosecondsTest(unittest.TestCase):
"""Tests for the POSIX timestamp in microseconds."""
# pylint: disable=protected-access
def testProperties(self):
"""Tests the properties."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
self.assertEqual(posix_time_object.timestamp, 1281643591546875)
posix_time_object = posix_time.PosixTimeInMicroseconds()
self.assertIsNone(posix_time_object.timestamp)
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281643591.546875'))
posix_time_object = posix_time.PosixTimeInMicroseconds()
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
# pylint: disable=protected-access
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
posix_time_object = posix_time.PosixTimeInMicroseconds()
expected_timestamp = 1281571200000000
posix_time_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191000000
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191546875
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281650791546875
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875-01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281643591546875
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875+01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = -11644387200000000
posix_time_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 20:06:31.546875')
posix_time_object = posix_time.PosixTimeInMicroseconds()
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testCopyToDateTimeStringISO8601(self):
"""Tests the CopyToDateTimeStringISO8601 function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
date_time_string = posix_time_object.CopyToDateTimeStringISO8601()
self.assertEqual(date_time_string, '2010-08-12T20:06:31.546875Z')
# TODO: remove this method when there is no more need for it in dfvfs.
def testCopyToStatTimeTuple(self):
"""Tests the CopyToStatTimeTuple function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (1281643591, 5468750))
posix_time_object = posix_time.PosixTimeInMicroseconds()
stat_time_tuple = posix_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (None, None))
def testGetDate(self):
"""Tests the GetDate function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
posix_time_object = posix_time.PosixTimeInMicroseconds()
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetTimeOfDay(self):
"""Tests the GetTimeOfDay function."""
posix_time_object = posix_time.PosixTimeInMicroseconds(
timestamp=1281643591546875)
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (20, 6, 31))
posix_time_object = posix_time.PosixTimeInMicroseconds()
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (None, None, None))
class PosixTimeInNanoSecondsTest(unittest.TestCase):
"""Tests for the POSIX timestamp in nanoseconds."""
# pylint: disable=protected-access
def testProperties(self):
"""Tests the properties."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
self.assertEqual(posix_time_object.timestamp, 1281643591987654321)
posix_time_object = posix_time.PosixTimeInNanoseconds()
self.assertIsNone(posix_time_object.timestamp)
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertEqual(
normalized_timestamp, decimal.Decimal('1281643591.987654321'))
posix_time_object = posix_time.PosixTimeInNanoseconds()
normalized_timestamp = posix_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
posix_time_object = posix_time.PosixTimeInNanoseconds()
expected_timestamp = 1281571200000000000
posix_time_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191000000000
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281647191654321000
posix_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.654321')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281650791654321000
posix_time_object.CopyFromDateTimeString(
'2010-08-12 21:06:31.654321-01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = 1281643591654321000
posix_time_object.CopyFromDateTimeString(
'2010-08-12 21:06:31.654321+01:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
expected_timestamp = -11644387200000000000
posix_time_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(posix_time_object.timestamp, expected_timestamp)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 20:06:31.987654321')
posix_time_object = posix_time.PosixTimeInNanoseconds()
date_time_string = posix_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testCopyToDateTimeStringISO8601(self):
"""Tests the CopyToDateTimeStringISO8601 function."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
date_time_string = posix_time_object.CopyToDateTimeStringISO8601()
self.assertEqual(date_time_string, '2010-08-12T20:06:31.987654321Z')
def testGetDate(self):
"""Tests the GetDate function."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
posix_time_object = posix_time.PosixTimeInNanoseconds()
date_tuple = posix_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetTimeOfDay(self):
"""Tests the GetTimeOfDay function."""
posix_time_object = posix_time.PosixTimeInNanoseconds(
timestamp=1281643591987654321)
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (20, 6, 31))
posix_time_object = posix_time.PosixTimeInNanoseconds()
time_of_day_tuple = posix_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (None, None, None))
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.forms import fields
from django.forms.widgets import Select
ISSUES = [("Economy", "Economy"), ("Health Care", "Health Care"),
("Terrorism", "Terrorism"), ("Environment", "Environment"), ]
class IssueForm(forms.Form):
issue = fields.ChoiceField(widget=Select(attrs={'class': 'form-control'}), choices=ISSUES)
|
from __future__ import print_function
import math
from tqdm import tqdm
import numpy as np
import tensorflow as tf
def primes_in_range(low, high):
def _is_prime(v):
if v <= 3:
return v > 1
sqrt = int(math.sqrt(v))
i = 2
while i <= sqrt:
if v % i == 0:
return False
i += 1
return True
return [v for v in tqdm(range(low + 1, high), desc="prime generator") if _is_prime(v)]
def dhe(x, k=1024, m=1e6):
"""
Args:
x: tensor, [batch, 1]
k:
m:
Returns:
"""
np.random.seed(10)
a = np.random.randint(0, high=int(10 * m), size=k)
b = np.random.randint(1, high=int(10 * m), size=k)
primes = primes_in_range(int(m), int(5 * m))
p = np.array(primes)[np.random.randint(0, len(primes), size=k)]
# [7685385 8100989 5242852 ... 6036356 4850432 7258590] [9196472 594920 1018547 ... 7235258 8984625 7008918] [2834759 9429691 5537101 ... 4366669 9261647 5737213]
print(a, b, p)
if x.shape[-1] != 1:
x = tf.expand_dims(x, axis=-1)
a = tf.reshape(tf.constant(a), shape=[1, k])
b = tf.reshape(tf.constant(b), shape=[1, k])
p = tf.reshape(tf.constant(p), shape=[1, k])
if len(x.shape) == 2:
x = tf.tile(x, multiples=[1, k])
elif len(x.shape) == 3:
x = tf.tile(x, multiples=[1, 1, k])
a = tf.expand_dims(a, axis=0)
b = tf.expand_dims(b, axis=0)
p = tf.expand_dims(p, axis=0)
else:
raise ValueError("not supported tensor shape")
encod = tf.mod(tf.mod((a * x + b), p), int(m))
encod = (tf.cast(encod, dtype=tf.float32) / float(m - 1) - 0.5) * 2.0
return encod
if __name__ == '__main__':
x = tf.constant([[1], [2]], dtype=tf.int64)
res = dhe(x, m=10)
print(res)
with tf.Session() as sess:
print(sess.run(res))
|
import unittest
from pizzerias import PizzeriasSample
class PizzeriasSampleTest(unittest.TestCase):
def test_sample(self):
n_of_block = 1000
n_of_shop = 10
seed = 42
sampler = PizzeriasSample(n_of_block, n_of_shops=n_of_shop, seed=seed)
res = [[(655, 693), 28], [(115, 759), 30], [(26, 914), 65], [(760, 559), 78], [(282, 90), 4], [(251, 605), 72],
[(229, 433), 26], [(143, 33), 92], [(755, 31), 84], [(105, 96), 90]]
self.assertEqual(res, sampler.sample(), "results of sample() method incorrect") |
from typing import Dict
import pymongo
class GlobalMetadataStore(object):
def __init__(self, connection_string: str, db: str):
self.client = pymongo.MongoClient(connection_string)
self.db = self.client[db]
self.workers_collection = self.db["workers"]
def get_all(self, worker_id: str) -> Dict:
worker = self.workers_collection.find_one({"worker_id": worker_id})
if "state" not in worker:
return {}
return worker["state"]
def set_all(self, worker_id: str, metadata: Dict):
self.workers_collection.update_one(
{"worker_id": worker_id}, {"$set": {"state": metadata}}, upsert=False
)
|
"""Riemann Theta Tests
References
----------
.. [CRTF] B. Deconinck, M. Heil, A. Bobenko, M. van Hoeij and M. Schmies,
Computing Riemann Theta Functions, Mathematics of Computation, 73, (2004),
1417-1442.
.. [DLMF] B. Deconinck, Digital Library of Mathematics Functions - Riemann Theta
Functions, http://dlmf.nist.gov/21
.. [SAGE] Computing Riemann theta functions in Sage with applications.
C. Swierczewski and B. Deconinck.Submitted for publication. Available online
at http://depts.washington.edu/bdecon/papers/pdfs/Swierczewski_Deconinck1.pdf
"""
import unittest
import numpy
from numpy.random import randn
from numpy.linalg import norm, cholesky
from abelfunctions.riemann_theta.radius import radius
from abelfunctions.riemann_theta.riemann_theta import RiemannTheta
# try to import mpmath's jtheta function
NO_JTHETA = False
try:
from sympy.mpmath import jtheta
except ImportError:
try:
from mpmath import jtheta
except ImportError:
NO_JTHETA = True
def thetag1(z,tau,N=2048):
r"""Naive implementation of genus 1 theta function."""
return sum(numpy.exp(numpy.pi*1.j*tau*n**2 + 2.j*numpy.pi*n*z)
for n in range(-N,N))
thetag1 = numpy.vectorize(thetag1, otypes=(numpy.complex,), excluded=(1,2))
class TestMaple(unittest.TestCase):
def setUp(self):
self.Omega3 = numpy.array(
[[1.j, 0.5, 0.5],
[0.5, 1.j, 0.5],
[0.5, 0.5, 1.j]], dtype=numpy.complex)
self.Omega4 = numpy.array([
[ 0.39344262+0.79503971j, -0.75409836-0.36912558j,
-0.44262295-0.02839428j, 0.20491803+0.26974562j],
[-0.75409836-0.36912558j, 0.27868852+0.85182827j,
0.09836066+0.19875993j, -0.43442623-0.15616852j],
[-0.44262295-0.02839428j, 0.09836066+0.19875993j,
-0.37704918+0.68146261j, -0.91803279+0.45430841j],
[ 0.20491803+0.26974562j, -0.43442623-0.15616852j,
-0.91803279+0.45430841j, -1.27868852+0.88022254j]
])
def test_value(self):
z = [0,0,0]
Omega = self.Omega3
value = RiemannTheta(z, Omega, epsilon=1e-14)
maple = 1.2362529854204190 - 0.52099320642367818e-10j
error = abs(value - maple)
self.assertLess(error, 1e-8)
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
value = RiemannTheta(w, Omega, epsilon=1e-14)
maple = 1.2544694041047501 - 0.77493173321770725j
error = abs(value - maple)
self.assertLess(error, 1e-8)
def test_first_derivatives(self):
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
Omega = self.Omega3
value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1,0,0]])
value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,1,0]])
value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,0,1]])
maple_z1 = -5.7295900733729553 - 0.89199375315523345j
maple_z2 = -0.16300987772384356 - 0.65079269102999180j
maple_z3 = 1.0115406077003542 + 0.030528533907836019j
error_z1 = abs(value_z1 - maple_z1)
error_z2 = abs(value_z2 - maple_z2)
error_z3 = abs(value_z3 - maple_z3)
self.assertLess(error_z1, 1e-8)
self.assertLess(error_z2, 1e-8)
self.assertLess(error_z3, 1e-8)
Omega = self.Omega4
w = [0,0,0,0]
value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1,0,0,0]])
value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,1,0,0]])
value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,0,1,0]])
value_z4 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,0,0,1]])
maple_z1 = 0.0
maple_z2 = 0.0
maple_z3 = 0.0
maple_z4 = 0.0
error_z1 = abs(value_z1 - maple_z1)
error_z2 = abs(value_z2 - maple_z2)
error_z3 = abs(value_z3 - maple_z3)
error_z4 = abs(value_z4 - maple_z4)
self.assertLess(error_z1, 1e-8)
self.assertLess(error_z2, 1e-8)
self.assertLess(error_z3, 1e-8)
self.assertLess(error_z4, 1e-8)
# different value of w
w = [-0.37704918-0.18456279j, 0.63934426+0.42591413j,
0.54918033+0.09937996j, -0.21721311-0.07808426j]
value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1,0,0,0]])
value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,1,0,0]])
value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,0,1,0]])
value_z4 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0,0,0,1]])
maple_z1 = 3.3644150756 + 2.5018071784j
maple_z2 = -2.9431860155 + 5.6802762853j
maple_z3 = 8.0319838396 + 3.5491434873j
maple_z4 = -6.0837267311 - 2.4867680289j
error_z1 = abs(value_z1 - maple_z1)
error_z2 = abs(value_z2 - maple_z2)
error_z3 = abs(value_z3 - maple_z3)
error_z4 = abs(value_z4 - maple_z4)
self.assertLess(error_z1, 1e-8)
self.assertLess(error_z2, 1e-8)
self.assertLess(error_z3, 1e-8)
self.assertLess(error_z4, 1e-8)
def test_first_derivatives_oscpart(self):
# different value of w
Omega = self.Omega4
w = [-0.37704918-0.18456279j, 0.63934426+0.42591413j,
0.54918033+0.09937996j, -0.21721311-0.07808426j]
value_z1 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[1,0,0,0]])
value_z2 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,1,0,0]])
value_z3 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,0,1,0]])
value_z4 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,0,0,1]])
maple_z1 = 1.723280564 + 1.281445835j
maple_z2 = -1.507523639 + 2.909483373j
maple_z3 = 4.114046968 + 1.817899948j
maple_z4 = -3.116133948 - 1.273742661j
error_z1 = abs(value_z1 - maple_z1)
error_z2 = abs(value_z2 - maple_z2)
error_z3 = abs(value_z3 - maple_z3)
error_z4 = abs(value_z4 - maple_z4)
self.assertLess(error_z1, 1e-8)
self.assertLess(error_z2, 1e-8)
self.assertLess(error_z3, 1e-8)
self.assertLess(error_z4, 1e-8)
def test_second_derivatives_oscpart(self):
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
Omega = self.Omega3
H = RiemannTheta.oscillatory_part_hessian(w, Omega, epsilon=1e-14)
maple_00 = -2.160656081990225 + 14.02434682346524j
maple_01 = -1.483857302597929 - 0.9449250397349686j
maple_02 = 1.954110529051029 - 1.042434632145520j
maple_11 = 1.037397682580653 + 0.1077503940181105j
maple_12 = 0.09466454944980265 - 0.3593388338083318j
maple_22 = -0.3227275082474401 - 2.585609638196203j
error_00 = abs(H[0,0] - maple_00)
error_01 = abs(H[0,1] - maple_01)
error_02 = abs(H[0,2] - maple_02)
error_11 = abs(H[1,1] - maple_11)
error_12 = abs(H[1,2] - maple_12)
error_22 = abs(H[2,2] - maple_22)
self.assertLess(error_00, 1e-8)
self.assertLess(error_01, 1e-8)
self.assertLess(error_02, 1e-8)
self.assertLess(error_11, 1e-8)
self.assertLess(error_12, 1e-8)
self.assertLess(error_22, 1e-8)
def test_third_derivatives_oscpart(self):
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
Omega = self.Omega3
dVec_1 = [[1,0,0],[1,0,0],[1,0,0]]
dVec_2 = [[1,0,0],[0,1,0],[1,0,0]]
dVec_3 = [[1,0,0],[0,0,1],[0,1,0]]
dVec_4 = [[0,1,0],[0,0,1],[0,1,0]]
dVec_5 = [[0,0,1],[0,0,1],[0,0,1]]
dVec_6 = [[0,0,1],[0,1,0],[0,0,1]]
dVec_7 = [[1,2,3.1],[2.9,-0.3,1.0],[-20,13.3,0.6684]]
maple_1 = 88.96174663331488 + 12.83401972101860j
maple_2 = -5.963646070489819 + 9.261504506522976j
maple_3 = -1.347499363888600 + 0.5297607158965981j
maple_4 = 1.217499355198950 + 0.8449102496878512j
maple_5 = -15.58299545726265 - 0.4376346712347114j
maple_6 = -2.441570516715710 - 0.2535384980716853j
maple_7 = -2791.345600876934 + 1286.207313664481j
deriv_1 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_1)
deriv_2 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_2)
deriv_3 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_3)
deriv_4 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_4)
deriv_5 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_5)
deriv_6 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_6)
deriv_7 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_7)
error_1 = abs(deriv_1 - maple_1)
error_2 = abs(deriv_2 - maple_2)
error_3 = abs(deriv_3 - maple_3)
error_4 = abs(deriv_4 - maple_4)
error_5 = abs(deriv_5 - maple_5)
error_6 = abs(deriv_6 - maple_6)
error_7 = abs(deriv_7 - maple_7)
self.assertLess(error_1, 1e-8)
self.assertLess(error_2, 1e-8)
self.assertLess(error_3, 1e-8)
self.assertLess(error_4, 1e-8)
self.assertLess(error_5, 1e-8)
self.assertLess(error_6, 1e-8)
self.assertLess(error_7, 1e-8)
# Genus 4 example
Omega = self.Omega4
w = [-0.37704918-0.18456279j, 0.63934426+0.42591413j,
0.54918033+0.09937996j, -0.21721311-0.07808426j]
dVec_1 = [[1,0,0,0],[1,0,0,0],[1,0,0,0]]
dVec_2 = [[1,0,0,0],[0,1,0,0],[0,0,1,0]]
dVec_3 = [[1,0,0,0],[0,0,1,0],[0,0,0,1]]
dVec_4 = [[1,0,0,0],[0,1,1,0],[1,0,0,1]]
dVec_5 = [[0,0,1,0],[0,1,1,0],[1,0,0,1]]
dVec_6 = [[0,0,1,0],[1,2,3,4],[1,0,0,1]]
dVec_7 = [[3.2,-9.8,0.004,-13.9],[0,2.4,0,4],[90.1,-12.93947,-1e-4,3]]
maple_1 = -67.14022021800414 - 50.25487358123665j
maple_2 = 6.220027066901749 - 16.96996479658767j
maple_3 = 14.42498231220689 + 16.30518807929409j
maple_4 = -35.67483045211793 - 18.14139876283777j
maple_5 = 53.25640352451774 + 18.93871689387491j
maple_6 = -185.6760275507559 - 93.99261766419004j
maple_7 = 239954.2751344823 + 129975.3988999572j
deriv_1 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_1)
deriv_2 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_2)
deriv_3 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_3)
deriv_4 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_4)
deriv_5 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_5)
deriv_6 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_6)
deriv_7 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_7)
error_1 = abs(deriv_1 - maple_1)
error_2 = abs(deriv_2 - maple_2)
error_3 = abs(deriv_3 - maple_3)
error_4 = abs(deriv_4 - maple_4)
error_5 = abs(deriv_5 - maple_5)
error_6 = abs(deriv_6 - maple_6)
error_7 = abs(deriv_7 - maple_7)
self.assertLess(error_1, 1e-8)
self.assertLess(error_2, 1e-8)
self.assertLess(error_3, 1e-8)
self.assertLess(error_4, 1e-8)
self.assertLess(error_5, 1e-8)
self.assertLess(error_6, 1e-8)
self.assertLess(error_7, 1e-8)
def test_sixth_derivatives(self):
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
Omega = self.Omega3
dVec_1 = [[1,0,0],[1,0,0],[0,1,0],[0,0,1],[0,0,1],[0,1,0]]
dVec_2 = [[1,2,3],[4,5,6],[0.7,0.8,0.9],[0.8,0.7,0.6],[5,4,3],[2,1,0]]
#dVec_3 = [[-17.3, 6.2, 0],[3.4, 3, 1],[-9,-0.001, 2],
# [1e-2, 0, 19],[210, 0.5, 1.2],[31.323, 0.3, 3]]
#dVec_4 = [[1,2,3],[4,5,6],[7,8,9],[8,7,6],[5,4,3],[2,1,0]]
# Neither of the above two examples pass the tests. It appears
# that for higher order derivatives, if the norm of the directional
# derivative is too large
maple_1 = 42.73836471691125 + 235.2990585642670j
maple_2 = 0.2152838084588008*10**7 - 0.3287239590246880*10**7*1j
#maple_3 = 0.2232644817692030*10**12 - 0.1226563725159786*10**12*1j
#maple_4 = 0.2152838084588008*10**9 - 0.3287239590246880*10**9*1j
deriv_1 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_1)
deriv_2 = RiemannTheta.oscillatory_part(w, Omega,
epsilon=1e-14, derivs=dVec_2)
#deriv_3 = RiemannTheta.oscillatory_part(w, Omega,
# epsilon=1e-14, derivs=dVec_3)
#deriv_4 = RiemannTheta.oscillatory_part(w, Omega,
# epsilon=1e-14, derivs=dVec_4)
error_1 = abs(deriv_1 - maple_1)
error_2 = abs(deriv_2 - maple_2)
#error_3 = abs(deriv_3 - maple_3)
#error_4 = abs(deriv_4 - maple_4)
self.assertLess(error_1, 1e-8)
self.assertLess(error_2, 1e-8)
#self.assertLess(error_3, 1e-8)
#self.assertLess(error_4, 1e-8)
class TestRiemannThetaValues(unittest.TestCase):
def setUp(self):
self.Omega3 = numpy.array(
[[1.j, 0.5, 0.5],
[0.5, 1.j, 0.5],
[0.5, 0.5, 1.j]], dtype=numpy.complex)
self.Omega4 = numpy.array(
[[ 0.39344262+0.79503971j, -0.75409836-0.36912558j,
-0.44262295-0.02839428j, 0.20491803+0.26974562j],
[-0.75409836-0.36912558j, 0.27868852+0.85182827j,
0.09836066+0.19875993j, -0.43442623-0.15616852j],
[-0.44262295-0.02839428j, 0.09836066+0.19875993j,
-0.37704918+0.68146261j, -0.91803279+0.45430841j],
[ 0.20491803+0.26974562j, -0.43442623-0.15616852j,
-0.91803279+0.45430841j, -1.27868852+0.88022254j]],
dtype=numpy.complex)
def test_issue84_value(self):
z = [0.5-1.10093687j, -0.11723434j]
Omega = [[0.5+2j, 0.5+1j],
[0.5+1j, 1+1.5j]]
theta_actual = 0.963179246467 - 6.2286820685j
for _ in range(1000):
theta = RiemannTheta(z,Omega)
error = abs(theta - theta_actual)
self.assertLess(error, 1e-5,
'%s not less than %s'
'\ntheta: %s\nactual: %s'%(
error,1e-5,theta, theta_actual))
def test_issue84_radius(self):
Omega = [[0.5+2j, 0.5+1j],
[0.5+1j, 1+1.5j]]
Omega = numpy.array(Omega)
Y = Omega.imag
T = cholesky(Y).T
R_actual = 5.01708695504
for _ in range(1000):
R = radius(1e-8,T)
error = abs(R - R_actual)
self.assertLess(error, 1e-8)
def test_gradient(self):
Omega = self.Omega3
# generate random test z-values
N = 32
u = numpy.random.rand(N,3)
v = numpy.random.rand(N,3)
W = u + 1.0j*v
# manually compute gradients
dz0 = RiemannTheta(W,Omega,derivs=[[1,0,0]])
dz1 = RiemannTheta(W,Omega,derivs=[[0,1,0]])
dz2 = RiemannTheta(W,Omega,derivs=[[0,0,1]])
grad1 = numpy.zeros_like(W, dtype=numpy.complex)
grad1[:,0] = dz0
grad1[:,1] = dz1
grad1[:,2] = dz2
# compute using "gradient"
grad2 = RiemannTheta.gradient(W,Omega)
self.assertLess(numpy.linalg.norm(grad1-grad2), 1e-14)
Omega = self.Omega4
# generate random test z-values
N = 32
u = numpy.random.rand(N,4)
v = numpy.random.rand(N,4)
W = u + 1.0j*v
# manually compute gradients
dz0 = RiemannTheta(W,Omega,derivs=[[1,0,0,0]])
dz1 = RiemannTheta(W,Omega,derivs=[[0,1,0,0]])
dz2 = RiemannTheta(W,Omega,derivs=[[0,0,1,0]])
dz3 = RiemannTheta(W,Omega,derivs=[[0,0,0,1]])
grad1 = numpy.zeros_like(W, dtype=numpy.complex)
grad1[:,0] = dz0
grad1[:,1] = dz1
grad1[:,2] = dz2
grad1[:,3] = dz3
# compute using "gradient"
grad2 = RiemannTheta.gradient(W,Omega)
self.assertLess(numpy.linalg.norm(grad1-grad2), 1e-14)
def test_second_derivative_symmetric(self):
w = [0.2+0.5j, 0.3-0.1j, -0.1+0.2j]
Omega = [[1.j, 0.5, 0.5],
[0.5, 1.j, 0.5],
[0.5, 0.5, 1.j]]
dz_01 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[1,0,0],[0,1,0]])
dz_10 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,1,0],[1,0,0]])
dz_02 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[1,0,0],[0,0,1]])
dz_20 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,0,1],[1,0,0]])
dz_12 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,1,0],[0,0,1]])
dz_21 = RiemannTheta.oscillatory_part(
w, Omega, epsilon=1e-14, derivs=[[0,0,1],[0,1,0]])
error_01_10 = abs(dz_01 - dz_10)
error_02_20 = abs(dz_02 - dz_20)
error_12_21 = abs(dz_12 - dz_21)
self.assertLess(error_01_10, 1e-8)
self.assertLess(error_02_20, 1e-8)
self.assertLess(error_12_21, 1e-8)
def test_symmetric_hessian(self):
pass
def test_hessian(self):
pass
def test_against_naive_implementation_genus1(self):
# tests the genus 1 Riemann theta function against the naive
# implementation written above (directly using the summation formula).
# first test the relative error using values close to the origin,
# avoiding the double-exponential growth
N = 64
sigma = 0.1
z = sigma*randn(N) + 1.j*sigma*randn(N)
z = z.reshape((N,1))
tau = [[1.0j]]
values1 = RiemannTheta(z,tau,epsilon=1e-16)
values2 = thetag1(z,tau[0][0])[:,0]
rel_error = abs((values1-values2)/values1)
rel_error_max = numpy.max(rel_error)
rel_error_avg = numpy.mean(rel_error)
self.assertLess(rel_error_max,1e-14)
self.assertLess(rel_error_avg,1e-14)
# next, test the relative error using larger magnitude values. we don't
# test the max error due to possible numerical roundoff issues
sigma = 3
z = sigma*randn(N) + 1.j*sigma*randn(N)
z = z.reshape((N,1))
tau = [[1.0j]]
values1 = RiemannTheta(z,tau,epsilon=1e-16)
values2 = thetag1(z,tau[0][0])[:,0]
rel_error = abs((values1-values2)/values1)
rel_error_avg = numpy.mean(rel_error)
self.assertLess(rel_error_avg,1e-14)
# repeat for different tau
tau = [[1.0 + 2.5j]]
values1 = RiemannTheta(z,tau,epsilon=1e-16)
values2 = thetag1(z,tau[0][0])[:,0]
rel_error = abs((values1-values2)/values1)
rel_error_avg = numpy.mean(rel_error)
self.assertLess(rel_error_avg,1e-14)
@unittest.skipIf(NO_JTHETA, 'Could not find sympy.mpmath.jtheta')
def test_against_sympy_jtheta(self):
N = 64
sigma = 2
z = sigma*randn(N) + 1.j*sigma*randn(N)
z = z.reshape((N,1))
tau = [[1.0j]]
# jtheta inputs
w = numpy.pi*z[:,0]
q = numpy.exp(numpy.pi*1.0j*tau[0][0])
values1 = RiemannTheta(z,tau,epsilon=1e-16)
values2 = numpy.array([jtheta(3,wi,q) for wi in w],
dtype=numpy.complex)
rel_error = abs((values1-values2)/values1)
rel_error_avg = numpy.mean(rel_error)
self.assertLess(rel_error_avg,1e-14)
# repeat for different tau
tau = [[1.0 + 2.5j]]
q = numpy.exp(numpy.pi*1.0j*tau[0][0])
values1 = RiemannTheta(z,tau,epsilon=1e-16)
values2 = numpy.array([jtheta(3,wi,q) for wi in w],
dtype=numpy.complex)
rel_error = abs((values1-values2)/values1)
rel_error_avg = numpy.mean(rel_error)
self.assertLess(rel_error_avg,1e-14)
# def test_value_at_point(self):
# Omega = numpy.array(
# [[1.0 + 1.15700539j, -1.0 - 0.5773502693j],
# [-1.0 - 0.5773502693j, 1.0 + 1.154700539j]],
# dtype=numpy.complex)
# # first z-value
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# # second z-value
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# # same tests, different Omega
# Omega = numpy.array(
# [[1.0 + 1.15700539j, -1.0 - 0.5773502693j],
# [-1.0 - 0.5773502693j, 1.0 + 1.154700539j]],
# dtype=numpy.complex)
# # first z-value
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# # second z-value
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# def test_value_at_point_1_derivs(self):
# Omega = numpy.array([[1.0 + 1.15700539j, -1.0 - 0.5773502693j],
# [-1.0 - 0.5773502693j, 1.0 + 1.154700539j]],
# dtype=numpy.complex)
# # test 1
# derivs = [[1,0]]
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega, derivs=derivs)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega, derivs=derivs)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# # test 2
# derivs = [[0,1]]
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega, derivs=derivs)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega, derivs=derivs)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# def test_value_at_point_2_derivs(self):
# Omega = numpy.array([[1.0 + 1.15700539j, -1.0 - 0.5773502693j],
# [-1.0 - 0.5773502693j, 1.0 + 1.154700539j]],
# dtype=numpy.complex)
# # test 1
# derivs = [[1,0]]
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega, derivs=derivs)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega, derivs=derivs)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
# # test 2
# derivs = [[0,1]]
# z = numpy.array([1.0 - 1.0j, 1.0 + 1.0j])
# u = RiemannTheta.exponential_part(z, Omega, derivs=derivs)
# u_actual = 0
# u_delta = abs(u - u_actual)
# self.assertAlmostEqual(u_delta, 0)
# v = RiemannTheta.oscillatory_part(z, Omega, derivs=derivs)
# v_actual = 0
# v_delta = abs(v - v_actual)
# self.assertAlmostEqual(v_delta, 0)
#########################################################################
# def test_zeroCharacteristic(self): #
# #Replace with random z, omega, or set of such #
# z = numpy.array([1.0j, 0]) #
# omega = numpy.matrix( #
# [[1.0j, -0.5], #
# [-0.5, 1.0j]] #
# ) #
# char = [[0,0],[0,0]] #
# thetaValue = RiemannTheta(z, omega, batch = False) #
# thetaCharValue = RiemannTheta.characteristic(char, z, omega) #
# delta = scipy.linalg.norm(thetaValue - thetaCharValue, numpy.inf) #
# self.assertAlmostEqual(delta,0) #
# #
# def test_jacobiTheta1(self): #
# #Test against sympy mpmath.jtheta(1,z) #
# z = 1.0j #
# q_omega = .1 #
# char = [[.5],[.5]] #
# jTheta1 = jtheta(1, numpy.pi * z, q_omega) #
# thetaValue = RiemannTheta.characteristic(char, z, q_omega) #
# self.assertAlmostEqual(jTheta1, -thetaValue) #
# #
# #
# def test_jacobiTheta2(self): #
# #Test against sympy mpmath.jtheta(2,z) #
# z = 1.0j #
# q_omega = .1 #
# char = [[.5],[0]] #
# jTheta2 = jtheta(2, numpy.pi * z, q_omega) #
# thetaValue = RiemannTheta.characteristic(char, z, q_omega) #
# self.assertAlmostEqual(jTheta2, thetaValue) #
# #
# def test_jacobiTheta3(self): #
# #Test against sympy mpmath.jtheta(3,z) #
# z = 1.0j #
# q_omega = .1 #
# char = [[0],[0]] #
# jTheta3 = jtheta(3, numpy.pi * z, q_omega) #
# thetaValue = RiemannTheta.characteristic(char, z, q_omega) #
# self.assertAlmostEqual(jTheta3, thetaValue) #
# #
# def test_jacobiTheta4(self): #
# #Test against sympy mpmath.jtheta(4,z) #
# z = 1.0j #
# q_omega = .1 #
# char = [[0],[.5]] #
# jTheta3 = jtheta(4, numpy.pi * z, q_omega) #
# thetaValue = RiemannTheta.characteristic(char, z, q_omega) #
# self.assertAlmostEqual(jTheta3, thetaValue) #
# #
# def test_zParity(self): #
# z = numpy.array([1.0j, 0]) #
# omega = numpy.matrix( #
# [[1.0j, -0.5], #
# [-0.5, 1.0j]] #
# ) #
# theta1 = RiemannTheta.value_at_point(z, omega, batch = False) #
# theta2 = RiemannTheta.value_at_point(-z, omega, batch = False) #
# self.assertAlmostEqual(theta1,theta2) #
# #
# def test_zIntShift(self): #
# z = numpy.array([1.0j, 0]) #
# omega = numpy.matrix( #
# [[1.0j, -0.5], #
# [-0.5, 1.0j]] #
# ) #
# m = numpy.array([1, 1]) #
# theta1 = RiemannTheta.value_at_point(z, omega, batch = False) #
# theta2 = RiemannTheta.value_at_point(z + m, omega, batch = False) #
# self.assertAlmostEqual(theta1,theta2) #
# #
# def test_quasiPeriodic(self): #
# #Test for DLMF 21.3.3 #
# pass #
# #
# def test_characteristicShift(self): #
# #Test for DLMF 21.3.4 #
# pass #
# #
# def test_halfperiodCharacteristic(self): #
# #Test for DLMF 21.3.6 #
# pass #
#########################################################################
|
_base_ = "./FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_Pbr_01_02MasterChefCan.py"
OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_ycbvPbr_SO/05_06MustardBottle"
DATASETS = dict(TRAIN=("ycbv_006_mustard_bottle_train_pbr",))
|
from audio_calculations import audio_calculations as ac
#One Channel 24-bit Wave File
def test_One_24():
a = ac('testfiles/SineWave.wav')
info = a.select_file()
assert 48000 == info[1]
assert 1 == info[2]
assert -9.1 == round(info[3],1)
assert -6.0 == round(info[4],1)
#One Channel 16-bit wave file
def test_One_16():
a = ac('testfiles/BabyElephantWalk60.wav')
info = a.select_file()
assert 22050 == info[1]
assert 1 == info[2]
assert -23.1 == round(info[3],1)
assert -5.3 == round(info[4],1)
#One Channel 8-bit wave file
def test_One_8():
a = ac('testfiles/taunt.wav')
info = a.select_file()
assert 22257 == info[1]
assert 1 == info[2]
assert -12.1 == round(info[3],1)
assert 0.6 == round(info[4],1)
#Two Channel - 24-bit Flac file
def test_Two_24_flac():
a = ac('testfiles/2_Channel_24_48_minus6db.flac')
info = a.select_file()
assert 48000 == info[1]
assert 2 == info[2]
assert -4.8 == round(info[3],1)
assert -0.6 == round(info[4],1)
#Two Channel - 24-bit Wave file
def test_Two_24():
a = ac('testfiles/2_Channel_24_48_minus6db.wav')
info = a.select_file()
assert 48000 == info[1]
assert 2 == info[2]
assert -4.8 == round(info[3],1)
assert -0.6 == round(info[4],1)
#Two Channel - 16-bit Wave file
def test_Two_16():
a = ac('testfiles/Adele.wav')
info = a.select_file()
assert 44100 == info[1]
assert 2 == info[2]
assert -8.5 == round(info[3],1)
assert -0.4 == round(info[4],1)
#Two Channel - 24-bit mp4 file
def test_Two_24_mp4():
a = ac('testfiles/2_Channel_24_48_minus6db.mp4')
info = a.select_file()
assert 44100 == info[1]
assert 2 == info[2]
assert -5.0 == round(info[3],1)
assert -0.4 == round(info[4],1)
#Six Channel - 24-bit wave file
def test_Six_24_Wave():
a = ac('testfiles/6_Channel_White_Noise.wav')
info = a.select_file()
assert 48000 == info[1]
assert 6 == info[2]
assert -0.2 == round(info[3],1)
assert -0.1 == round(info[4],1) |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for Model Maker.
To build package:
python setup.py sdist bdist_wheel
To install directly:
pip install -e .
To uninstall:
pip uninstall tflite-model-maker
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import pathlib
import sys
import setup_util
from setuptools import setup
nightly = False
if '--nightly' in sys.argv:
nightly = True
sys.argv.remove('--nightly')
project_name = 'tflite-model-maker'
datestring = datetime.datetime.now().strftime('%Y%m%d%H%M')
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
# Set package version.
if nightly:
project_name = '{}-nightly'.format(project_name)
version = '0.3.5' # Version prefix, usually major.minor.micro.
version = '{:s}.dev{:s}'.format(version, datestring)
classifiers += [
'Development Status :: 4 - Beta',
]
else:
# LINT.IfChange(model_maker_pip_version)
version = '0.3.4'
# LINT.ThenChange(../public/__init__.py, ../RELEASE.md)
# Path to model_maker dir: <repo>/tensorflow_examples/lite/model_maker
BASE_DIR = pathlib.Path(os.path.abspath(__file__)).parents[1]
# Path to root dir: <repo>
ROOT_DIR = BASE_DIR.parents[2]
# Original namespace of the lib.
LIB_NAMESPACE = 'tensorflow_examples.lite.model_maker'
# Official package namespace for API. Used as code name.
API_NAMESPACE = 'tflite_model_maker'
# Internal package tflite_model_maker.python mapping internal packages.
INTERNAL_NAME = 'python'
MODEL_MAKER_CONSOLE = 'tflite_model_maker=tflite_model_maker.python.cli.cli:main'
# Build dir `pip_package/src`: copy all source code and create a package.
SRC_NAME = 'src'
BUILD_DIR = BASE_DIR.joinpath('pip_package').joinpath(SRC_NAME)
# Setup options.
setup_options = {
'package_dir': {
'': SRC_NAME
},
'entry_points': {
'console_scripts': [MODEL_MAKER_CONSOLE,],
},
}
DESCRIPTION = ('TFLite Model Maker: a model customization library for on-device'
' applications.')
with BASE_DIR.joinpath('README.md').open() as readme_file:
LONG_DESCRIPTION = readme_file.read()
def _read_required_packages(fpath):
with fpath.open() as f:
required_pkgs = [l.strip() for l in f.read().splitlines()]
required_pkgs = list(
filter(lambda line: line and not line.startswith('#'), required_pkgs))
return required_pkgs
def get_required_packages():
"""Gets packages inside requirements.txt."""
# Gets model maker's required packages
filename = 'requirements_nightly.txt' if nightly else 'requirements.txt'
fpath = BASE_DIR.joinpath(filename)
required_pkgs = _read_required_packages(fpath)
return required_pkgs
extra_options = setup_util.PackageGen(BASE_DIR, ROOT_DIR, BUILD_DIR, nightly,
version, LIB_NAMESPACE, API_NAMESPACE,
INTERNAL_NAME).run()
setup_options.update(extra_options)
setup(
name=project_name,
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author='Google LLC',
author_email='[email protected]',
url='http://github.com/tensorflow/examples',
download_url='https://github.com/tensorflow/examples/tags',
license='Apache 2.0',
scripts=[],
install_requires=get_required_packages(),
classifiers=classifiers,
keywords=['tensorflow', 'lite', 'model customization', 'transfer learning'],
**setup_options)
|
import boto3
import json
import apiGatewayLogger as logger
def main():
#* load boto client
client = boto3.client('apigateway')
#* set logger
logger.setLogger('authorizer_enable_apiGateway_cognito.log')
#* get api key with default to prod
api_id = GetAPIId()
#* get a dict of resources for the API {resource_name:resource_id,[methods]}
resources_dict = GetResources(client, api_id)
#* get authorizer
authorizer = GetAuthorizer()
#* Create Authorizer
authorizer = CreateAuthorizer(client, api_id, authorizer)
#* configure authorizer
ConfigureAuthorizer(client, api_id, authorizer, resources_dict)
def GetAPIId():
api_id = input('Enter API Gateway ID (press enter for default): ')
if api_id == '':
api_id = 'ez9pmaodek'
logger.inputTrace('API ID', api_id)
return api_id
def GetAuthorizer():
authorizer = {'type':'COGNITO_USER_POOLS'}
authorizer_name = input('Enter the authorizer name (press enter for default): ')
if authorizer_name == '':
authorizer_name = 'prod-api-authorizer'
logger.inputTrace('Authorizer Name', authorizer_name)
authorizer['name'] = authorizer_name
providerARNs = GetUserPools()
authorizer['providerARNs'] = providerARNs
identitySource = input('Enter the authorizer identity source (request header): ')
if identitySource == '':
identitySource = 'Authorization'
identitySource = 'method.request.header.' + identitySource
logger.inputTrace('Authorizer Identity Source', identitySource)
authorizer['identitySource'] = identitySource
authorizer_ttl = input('Enter the authorizer time to live (in seconds): ')
if authorizer_ttl == '':
authorizer_ttl = '300'
try:
authorizer_ttl = int(authorizer_ttl)
except:
authorizer_ttl = 300
print('Time to live must be numerical, it has been set to default 300s!')
logger.inputTrace('Authorizer Time To Live', str(authorizer_ttl))
authorizer['TTL'] = authorizer_ttl
return authorizer
def GetUserPools():
client = boto3.client('cognito-idp')
userPool_lst = client.list_user_pools(
MaxResults = 60 #must be less or equal to 60
)
userPool_lst = userPool_lst['UserPools']
logger.generatedDebug('Cognito User Pools', ','.join(str(x) for x in userPool_lst))
print('List of all Cognito User Pools:')
length = len(userPool_lst)
for l in range(length):
print (str(l + 1)+'. '+userPool_lst[l]['Name'], end=' ')
print(' ')
selected_pools = input('Select user pools to be used by its number, seperated by comma (default is 1): ')
if selected_pools == '':
selected_pools = [0]
else:
try:
selected_pools = list(map(int, selected_pools))
except:
selected_pools = [0]
print ('Your selection must be numerical saperated by comma, revert to default user group!')
logger.inputTrace('User Pools selected:', ','.join(str(x) for x in selected_pools))
if any([num > length+1 for num in selected_pools]):
print('Your selection is out of range!')
GetUserPools()
pools_lst = []
for p in selected_pools:
id = userPool_lst[p-1]['Id']
arn = 'arn:aws:cognito-idp:us-west-2:061431082068:userpool/' + id
pools_lst.append(arn)
logger.generatedDebug('User Pools ARNs', pools_lst)
return pools_lst
def GetResources(client, api_id):
response_get_resources = client.get_resources(
restApiId=api_id
)
resources = response_get_resources['items']
resources_dict = {}
for resource in resources:
#print(resource)
resource_id = resource['id']
resource_path = resource['path']
methods = []
if resource_path != '/': # avoid root path, which don't have methods
resource_methods = resource['resourceMethods']
for method in resource_methods:
methods.append(method)
resource = resource_path.split('/')
length = len(resource)
if length == 3:
resource = resource[-1].replace('{', '')
resource = resource.replace('}', '')
elif length == 4:
resource = resource[-2].replace('{', '') + '_' + resource[-1]
resource = resource.replace('}', '')
elif length == 2 and resource[-1] != '':
resource = resource[-1]
if type(resource) == str:
temp_lst = [resource_id, methods]
resources_dict[resource] = temp_lst
logger.generatedDebug('API Resource Dictionary', json.dumps(resources_dict))
return resources_dict
def CreateAuthorizer(client, api_id, authorizer):
response = client.create_authorizer(
restApiId = api_id,
name = authorizer['name'],
type = authorizer['type'],
providerARNs = authorizer['providerARNs'],
identitySource = authorizer['identitySource'],
authorizerResultTtlInSeconds = authorizer['TTL']
)
logger.createInfo('Authorizer', json.dumps(response))
authorizer['id'] = response['id']
return authorizer
def ConfigureAuthorizer(client, api_id, authorizer, resources_dict):
for resource in resources_dict:
resource_id = resources_dict[resource][0]
methods = resources_dict[resource][1]
for method in methods:
response = client.update_method(
restApiId = api_id,
resourceId = resource_id,
httpMethod = method,
patchOperations=[{
'op': 'replace',
'path': '/authorizationType',
'value': authorizer['type']
},
{
'op': 'replace',
'path': '/authorizerId',
'value': authorizer['id']
}]
)
logger.runTrace('Authorizer is configured on', method)
print(response)
if __name__ == "__main__":
main() |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Preprocessing -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for the preprocessing phase of the
breast cancer project.
"""
import math
import os
import numpy as np
import openslide
from openslide import OpenSlideError
from openslide.deepzoom import DeepZoomGenerator
import pandas as pd
from pyspark.ml.linalg import Vectors
import pyspark.sql.functions as F
from scipy.ndimage.morphology import binary_fill_holes
from skimage.color import rgb2gray
from skimage.feature import canny
from skimage.morphology import binary_closing, binary_dilation, disk
# Open Whole-Slide Image
def open_slide(slide_num, folder, training):
"""
Open a whole-slide image, given an image number.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
An OpenSlide object representing a whole-slide image.
"""
if training:
filename = os.path.join(folder, "training_image_data",
"TUPAC-TR-{}.svs".format(str(slide_num).zfill(3)))
else:
# Testing images
filename = os.path.join(folder, "testing_image_data",
"TUPAC-TE-{}.svs".format(str(slide_num).zfill(3)))
try:
slide = openslide.open_slide(filename)
except OpenSlideError:
slide = None
except FileNotFoundError:
slide = None
return slide
# Create Tile Generator
def create_tile_generator(slide, tile_size, overlap):
"""
Create a tile generator for the given slide.
This generator is able to extract tiles from the overall
whole-slide image.
Args:
slide: An OpenSlide object representing a whole-slide image.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A DeepZoomGenerator object representing the tile generator. Each
extracted tile is a PIL Image with shape
(tile_size, tile_size, channels).
Note: This generator is not a true "Python generator function", but
rather is an object that is capable of extracting individual tiles.
"""
generator = DeepZoomGenerator(slide, tile_size=tile_size, overlap=overlap, limit_bounds=True)
return generator
# Determine 20x Magnification Zoom Level
def get_20x_zoom_level(slide, generator):
"""
Return the zoom level that corresponds to a 20x magnification.
The generator can extract tiles from multiple zoom levels,
downsampling by a factor of 2 per level from highest to lowest
resolution.
Args:
slide: An OpenSlide object representing a whole-slide image.
generator: A DeepZoomGenerator object representing a tile generator.
Note: This generator is not a true "Python generator function",
but rather is an object that is capable of extracting individual
tiles.
Returns:
Zoom level corresponding to a 20x magnification, or as close as
possible.
"""
highest_zoom_level = generator.level_count - 1 # 0-based indexing
try:
mag = int(slide.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
# `mag / 20` gives the downsampling factor between the slide's
# magnification and the desired 20x magnification.
# `(mag / 20) / 2` gives the zoom level offset from the highest
# resolution level, based on a 2x downsampling factor in the
# generator.
offset = math.floor((mag / 20) / 2)
level = highest_zoom_level - offset
except ValueError:
# In case the slide magnification level is unknown, just
# use the highest resolution.
level = highest_zoom_level
return level
# Generate Tile Indices For Whole-Slide Image.
def process_slide(slide_num, folder, training, tile_size, overlap):
"""
Generate all possible tile indices for a whole-slide image.
Given a slide number, tile size, and overlap, generate
all possible (slide_num, tile_size, overlap, zoom_level, col, row)
indices.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A list of (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuples representing possible tiles to extract.
"""
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Get 20x zoom level.
zoom_level = get_20x_zoom_level(slide, generator)
# Generate all possible (zoom_level, col, row) tile index tuples.
cols, rows = generator.level_tiles[zoom_level]
tile_indices = [(slide_num, tile_size, overlap, zoom_level, col, row)
for col in range(cols) for row in range(rows)]
return tile_indices
# Generate Tile From Tile Index
def process_tile_index(tile_index, folder, training):
"""
Generate a tile from a tile index.
Given a (slide_num, tile_size, overlap, zoom_level, col, row) tile
index, generate a (slide_num, tile) tuple.
Args:
tile_index: A (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuple representing a tile to extract.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
A (slide_num, tile) tuple, where slide_num is an integer, and tile
is a 3D NumPy array of shape (tile_size, tile_size, channels) in
RGB format.
"""
slide_num, tile_size, overlap, zoom_level, col, row = tile_index
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Generate tile.
tile = np.asarray(generator.get_tile(zoom_level, (col, row)))
return (slide_num, tile)
# Filter Tile For Dimensions & Tissue Threshold
def optical_density(tile):
"""
Convert a tile to optical density values.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
A 3D NumPy array of shape (tile_size, tile_size, channels)
representing optical density values.
"""
tile = tile.astype(np.float64)
#od = -np.log10(tile/255 + 1e-8)
od = -np.log((tile+1)/240)
return od
def keep_tile(tile_tuple, tile_size, tissue_threshold):
"""
Determine if a tile should be kept.
This filters out tiles based on size and a tissue percentage
threshold, using a custom algorithm. If a tile has height &
width equal to (tile_size, tile_size), and contains greater
than or equal to the given percentage, then it will be kept;
otherwise it will be filtered out.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
tile_size: The width and height of a square tile to be generated.
tissue_threshold: Tissue percentage threshold.
Returns:
A Boolean indicating whether or not a tile should be kept for
future usage.
"""
slide_num, tile = tile_tuple
if tile.shape[0:2] == (tile_size, tile_size):
tile_orig = tile
# Check 1
# Convert 3D RGB image to 2D grayscale image, from
# 0 (dense tissue) to 1 (plain background).
tile = rgb2gray(tile)
# 8-bit depth complement, from 1 (dense tissue)
# to 0 (plain background).
tile = 1 - tile
# Canny edge detection with hysteresis thresholding.
# This returns a binary map of edges, with 1 equal to
# an edge. The idea is that tissue would be full of
# edges, while background would not.
tile = canny(tile)
# Binary closing, which is a dilation followed by
# an erosion. This removes small dark spots, which
# helps remove noise in the background.
tile = binary_closing(tile, disk(10))
# Binary dilation, which enlarges bright areas,
# and shrinks dark areas. This helps fill in holes
# within regions of tissue.
tile = binary_dilation(tile, disk(10))
# Fill remaining holes within regions of tissue.
tile = binary_fill_holes(tile)
# Calculate percentage of tissue coverage.
percentage = tile.mean()
check1 = percentage >= tissue_threshold
# Check 2
# Convert to optical density values
tile = optical_density(tile_orig)
# Threshold at beta
beta = 0.15
tile = np.min(tile, axis=2) >= beta
# Apply morphology for same reasons as above.
tile = binary_closing(tile, disk(2))
tile = binary_dilation(tile, disk(2))
tile = binary_fill_holes(tile)
percentage = tile.mean()
check2 = percentage >= tissue_threshold
return check1 and check2
else:
return False
# Generate Samples From Tile
def process_tile(tile_tuple, sample_size, grayscale):
"""
Process a tile into a group of smaller samples.
Cut up a tile into smaller blocks of sample_size x sample_size pixels,
change the shape of each sample from (H, W, channels) to
(channels, H, W), then flatten each into a vector of length
channels*H*W.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
Returns:
A list of (slide_num, sample) tuples representing cut up tiles,
where each sample is a 3D NumPy array of shape
(sample_size_x, sample_size_y, channels).
"""
slide_num, tile = tile_tuple
if grayscale:
tile = rgb2gray(tile)[:, :, np.newaxis] # Grayscale
# Save disk space and future IO time by converting from [0,1] to [0,255],
# at the expense of some minor loss of information.
tile = np.round(tile * 255).astype("uint8")
x, y, ch = tile.shape
# 1. Reshape into a 5D array of (num_x, sample_size_x, num_y, sample_size_y, ch), where
# num_x and num_y are the number of chopped tiles on the x and y axes, respectively.
# 2. Swap sample_size_x and num_y axes to create
# (num_x, num_y, sample_size_x, sample_size_y, ch).
# 3. Combine num_x and num_y into single axis, returning
# (num_samples, sample_size_x, sample_size_y, ch).
samples = (tile.reshape((x // sample_size, sample_size, y // sample_size, sample_size, ch))
.swapaxes(1,2)
.reshape((-1, sample_size, sample_size, ch)))
samples = [(slide_num, sample) for sample in list(samples)]
return samples
# Normalize staining
def normalize_staining(sample_tuple, beta=0.15, alpha=1, light_intensity=255):
"""
Normalize the staining of H&E histology slides.
This function normalizes the staining of H&E histology slides.
References:
- Macenko, Marc, et al. "A method for normalizing histology slides
for quantitative analysis." Biomedical Imaging: From Nano to Macro,
2009. ISBI'09. IEEE International Symposium on. IEEE, 2009.
- http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf
- https://github.com/mitkovetta/staining-normalization
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample is a 3D NumPy array
of shape (H,W,C) that has been stain normalized.
"""
# Setup.
slide_num, sample = sample_tuple
x = np.asarray(sample)
h, w, c = x.shape
x = x.reshape(-1, c).astype(np.float64) # shape (H*W, C)
# Reference stain vectors and stain saturations. We will normalize all slides
# to these references. To create these, grab the stain vectors and stain
# saturations from a desirable slide.
# Values in reference implementation for use with eigendecomposition approach, natural log,
# and `light_intensity=240`.
#stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)
#max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)
# SVD w/ log10, and `light_intensity=255`.
stain_ref = (np.array([0.54598845, 0.322116, 0.72385198, 0.76419107, 0.42182333, 0.55879629])
.reshape(3,2))
max_sat_ref = np.array([0.82791151, 0.61137274]).reshape(2,1)
# Convert RGB to OD.
# Note: The original paper used log10, and the reference implementation used the natural log.
#OD = -np.log((x+1)/light_intensity) # shape (H*W, C)
OD = -np.log10(x/light_intensity + 1e-8)
# Remove data with OD intensity less than beta.
# I.e. remove transparent pixels.
# Note: This needs to be checked per channel, rather than
# taking an average over all channels for a given pixel.
OD_thresh = OD[np.all(OD >= beta, 1), :] # shape (K, C)
# Calculate eigenvectors.
# Note: We can either use eigenvector decomposition, or SVD.
#eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T)) # np.cov results in inf/nans
U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)
# Extract two largest eigenvectors.
# Note: We swap the sign of the eigvecs here to be consistent
# with other implementations. Both +/- eigvecs are valid, with
# the same eigenvalue, so this is okay.
#top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1
top_eigvecs = V[0:2, :].T * -1 # shape (C, 2)
# Project thresholded optical density values onto plane spanned by
# 2 largest eigenvectors.
proj = np.dot(OD_thresh, top_eigvecs) # shape (K, 2)
# Calculate angle of each point wrt the first plane direction.
# Note: the parameters are `np.arctan2(y, x)`
angles = np.arctan2(proj[:, 1], proj[:, 0]) # shape (K,)
# Find robust extremes (a and 100-a percentiles) of the angle.
min_angle = np.percentile(angles, alpha)
max_angle = np.percentile(angles, 100-alpha)
# Convert min/max vectors (extremes) back to optimal stains in OD space.
# This computes a set of axes for each angle onto which we can project
# the top eigenvectors. This assumes that the projected values have
# been normalized to unit length.
extreme_angles = np.array(
[[np.cos(min_angle), np.cos(max_angle)],
[np.sin(min_angle), np.sin(max_angle)]]
) # shape (2,2)
stains = np.dot(top_eigvecs, extreme_angles) # shape (C, 2)
# Merge vectors with hematoxylin first, and eosin second, as a heuristic.
if stains[0, 0] < stains[0, 1]:
stains[:, [0, 1]] = stains[:, [1, 0]] # swap columns
# Calculate saturations of each stain.
# Note: Here, we solve
# OD = VS
# S = V^{-1}OD
# where `OD` is the matrix of optical density values of our image,
# `V` is the matrix of stain vectors, and `S` is the matrix of stain
# saturations. Since this is an overdetermined system, we use the
# least squares solver, rather than a direct solve.
sats, _, _, _ = np.linalg.lstsq(stains, OD.T)
# Normalize stain saturations to have same pseudo-maximum based on
# a reference max saturation.
max_sat = np.percentile(sats, 99, axis=1, keepdims=True)
sats = sats / max_sat * max_sat_ref
# Compute optimal OD values.
OD_norm = np.dot(stain_ref, sats)
# Recreate image.
# Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will
# not return the correct values due to the initital values being outside of [0,255].
# To fix this, we round to the nearest integer, and then clip to [0,255], which is the
# same behavior as Matlab.
#x_norm = np.exp(OD_norm) * light_intensity # natural log approach
x_norm = 10**(-OD_norm) * light_intensity - 1e-8 # log10 approach
x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)
x_norm = x_norm.astype(np.uint8)
x_norm = x_norm.T.reshape(h,w,c)
return (slide_num, x_norm)
def flatten_sample(sample_tuple):
"""
Flatten a (H,W,C) sample into a (C*H*W) row vector.
Transpose each sample from (H, W, channels) to (channels, H, W), then
flatten each into a vector of length channels*H*W.
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample has been transposed
from (H,W,C) to (C,H,W), and flattened to a vector of length
(C*H*W).
"""
slide_num, sample = sample_tuple
# 1. Swap axes from (sample_size_x, sample_size_y, ch) to
# (ch, sample_size_x, sample_size_y).
# 2. Flatten sample into (ch*sample_size_x*sample_size_y).
flattened_sample = sample.transpose(2,0,1).reshape(-1)
return (slide_num, flattened_sample)
# Get Ground Truth Labels
def get_labels_df(folder, filename="training_ground_truth.csv"):
"""
Create a DataFrame with the ground truth labels for each slide.
Args:
folder: Directory containing a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide.
Returns:
A Pandas DataFrame containing the ground truth labels for each
slide.
"""
filepath = os.path.join(folder, filename)
labels_df = pd.read_csv(filepath, names=["tumor_score", "molecular_score"], header=None)
labels_df["slide_num"] = labels_df.index + 1 # slide numbering starts at 1
labels_df.set_index("slide_num", drop=False, inplace=True) # use the slide num as index
return labels_df
# Process All Slides Into A Spark DataFrame
def preprocess(spark, slide_nums, folder="data", training=True, tile_size=1024, overlap=0,
tissue_threshold=0.9, sample_size=256, grayscale=False, normalize_stains=True,
num_partitions=20000):
"""
Preprocess a set of whole-slide images.
Preprocess a set of whole-slide images as follows:
1. Tile the slides into tiles of size (tile_size, tile_size, 3).
2. Filter the tiles to remove unnecessary tissue.
3. Cut the remaining tiles into samples of size
(sample_size, sample_size, ch), where `ch` is 1 if `grayscale`
is true, or 3 otherwise.
Args:
spark: SparkSession.
slide_nums: List of whole-slide numbers to process.
folder: Local directory in which the slides folder and ground truth
file is stored, as a string. This should contain a
`training_image_data` folder with images in the format
`TUPAC-TR-###.svs`, as well as a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide. Alternatively, the folder should contain a
`testing_image_data` folder with images in the format
`TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
tissue_threshold: Tissue percentage threshold for filtering.
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
normalize_stains: Whether or not to apply stain normalization.
num_partitions: Number of partitions to use during processing.
Returns:
A Spark DataFrame in which each row contains the slide number, tumor
score, molecular score, and the sample stretched out into a Vector.
"""
# Filter out broken slides
# Note: "Broken" here is due to a "version of OpenJPEG with broken support for chroma-subsampled
# images".
slides = (spark.sparkContext
.parallelize(slide_nums)
.filter(lambda slide: open_slide(slide, folder, training) is not None))
# Create DataFrame of all tile locations and increase number of partitions
# to avoid OOM during subsequent processing.
tile_indices = (slides.flatMap(
lambda slide: process_slide(slide, folder, training, tile_size, overlap)))
# TODO: Explore computing the ideal paritition sizes based on projected number
# of tiles after filtering. I.e. something like the following:
#rows = tile_indices.count()
#part_size = 128
#channels = 1 if grayscale else 3
#row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB
#rows_per_part = round(part_size / row_mb)
#num_parts = rows / rows_per_part
tile_indices = tile_indices.repartition(num_partitions)
tile_indices.cache()
# Extract all tiles into an RDD, filter, cut into smaller samples, apply stain
# normalization, and flatten.
tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))
filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))
samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))
if normalize_stains:
samples = samples.map(lambda sample: normalize_staining(sample))
samples = samples.map(lambda sample: flatten_sample(sample))
# Convert to a DataFrame
if training:
# Append labels
labels_df = get_labels_df(folder)
samples_with_labels = (samples.map(
lambda tup: (int(tup[0]), int(labels_df.at[tup[0],"tumor_score"]),
float(labels_df.at[tup[0],"molecular_score"]), Vectors.dense(tup[1]))))
df = samples_with_labels.toDF(["slide_num", "tumor_score", "molecular_score", "sample"])
df = df.select(df.slide_num.astype("int"), df.tumor_score.astype("int"),
df.molecular_score, df["sample"])
else: # testing data -- no labels
df = samples.toDF(["slide_num", "sample"])
df = df.select(df.slide_num.astype("int"), df["sample"])
return df
# Save DataFrame
def save(df, filepath, sample_size, grayscale, mode="error", format="parquet", file_size=128):
"""
Save a preprocessed DataFrame with a constraint on the file sizes.
Args:
df: A Spark DataFrame.
filepath: Hadoop-supported path at which to save `df`.
sample_size: The width and height of the square samples.
grayscale: Whether or not to the samples are in grayscale format,
rather than RGB.
mode: Specifies the behavior of `df.write.mode` when the data
already exists. Options include:
* `append`: Append contents of this DataFrame to
existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already
exists.
format: The format in which to save the DataFrame.
file_size: Size in MB of each saved file. 128 MB is an
empirically ideal size.
"""
channels = 1 if grayscale else 3
row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB
rows_per_file = round(file_size / row_mb)
df.write.option("maxRecordsPerFile", rows_per_file).mode(mode).save(filepath, format=format)
# Utilities
def add_row_indices(df, training=True):
"""
Add a row index column for faster data ingestion times with SystemML.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
training: Boolean for training or testing datasets.
Returns:
The Spark DataFrame with a row index column called "__INDEX".
"""
rdd = (df.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0]))) # flatten & convert index to 1-based indexing
if training:
df = rdd.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample'])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"),
df.tumor_score.astype("int"), df.molecular_score, df["sample"])
else: # testing data -- no labels
df = rdd.toDF(["__INDEX", "slide_num", "sample"])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"), df["sample"])
return df
def sample(df, frac, training=True, seed=None):
"""
Sample the DataFrame, stratified on the class.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
frac: Fraction of rows to keep.
training: Boolean for training or testing datasets.
seed: Random seed used for the sampling.
Returns:
A stratified sample of the original Spark DataFrame.
"""
df_sample = df.sampleBy("tumor_score", fractions={1: frac, 2: frac, 3: frac}, seed=seed)
return df_sample
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, caffe2_xavier_init
from ..builder import NECKS
from .decode_head import BaseDecodeHead
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, act_cfg, align_corners, **kwargs):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
**kwargs)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
upsampled_ppm_out = F.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
@NECKS.register_module()
class UPerHead(BaseDecodeHead):
"""Unified Perceptual Parsing for Scene Understanding.
This head is the implementation of `UPerNet
<https://arxiv.org/abs/1807.10221>`_.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module applied on the last feature. Default: (1, 2, 3, 6).
"""
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(UPerHead, self).__init__(input_transform='multiple_select', **kwargs)
# PSP Module
self.psp_modules = PPM(
pool_scales,
self.in_channels[-1],
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.bottleneck = ConvModule(
self.in_channels[-1] + len(pool_scales) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = ConvModule(
in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
fpn_conv = ConvModule(
self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = ConvModule(
len(self.in_channels) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def init_weights(self):
"""Initialize the weights of module."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def psp_forward(self, inputs):
"""Forward function of PSP module."""
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, inputs):
"""Forward function."""
inputs = self._transform_inputs(inputs)
# build laterals
laterals = [lateral_conv(inputs[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(inputs))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(laterals[i], size=prev_shape, mode='bilinear', align_corners=self.align_corners)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = F.interpolate(fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = [output]
return output
|
import os
import sys
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Support GMPY for high-speed large integer arithmetic. #
# #
# To allow an external module to handle arithmetic, we need to make sure #
# that all high-precision variables are declared of the correct type. MP_BASE#
# is the constructor for the high-precision type. It defaults to Python's #
# long type but can be assinged another type, typically gmpy.mpz. #
# #
# MP_BASE must be used for the mantissa component of an mpf and must be used #
# for internal fixed-point operations. #
# #
# Side-effects #
# 1) "is" cannot be used to test for special values. Must use "==". #
# 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. #
#----------------------------------------------------------------------------#
# So we can import it from this module
gmpy = None
MODE = 'python'
MP_BASE = long
if 'MPMATH_NOGMPY' not in os.environ:
try:
import gmpy
if gmpy.version() >= '1.03':
MODE = 'gmpy'
MP_BASE = gmpy.mpz
except:
pass
if os.environ.has_key('MPMATH_STRICT'):
STRICT = True
else:
STRICT = False
MP_BASE_TYPE = type(MP_BASE(0))
MP_ZERO = MP_BASE(0)
MP_ONE = MP_BASE(1)
MP_TWO = MP_BASE(2)
MP_THREE = MP_BASE(3)
MP_FIVE = MP_BASE(5)
if MODE == 'gmpy':
int_types = (int, long, MP_BASE_TYPE)
else:
int_types = (int, long)
class Context(object):
def __repr__(self):
lines = ["Mpmath settings:",
(" mp.prec = %s" % self.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % self.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % self.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
def default(self):
self._prec = prec_rounding[0] = 53
self._dps = 15
self.trap_complex = False
def set_prec(self, n):
self._prec = prec_rounding[0] = max(1, int(n))
self._dps = prec_to_dps(n)
def set_dps(self, n):
self._prec = prec_rounding[0] = dps_to_prec(n)
self._dps = max(1, int(n))
prec = property(lambda self: self._prec, set_prec)
dps = property(lambda self: self._dps, set_dps)
# Hack for fast access
prec_rounding = [53, round_nearest]
mp = Context()
mp.default()
class PrecisionManager:
def __init__(self, precfun, dpsfun, normalize_output=False):
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = mp.prec
try:
if self.precfun:
mp.prec = self.precfun(mp.prec)
else:
mp.dps = self.dpsfun(mp.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
mp.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = mp.prec
if self.precfun:
mp.prec = self.precfun(mp.prec)
else:
mp.dps = self.dpsfun(mp.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
mp.prec = self.origp
return False
def extraprec(n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(lambda p: p + n, None, normalize_output)
def extradps(n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(None, lambda d: d + n, normalize_output)
def workprec(n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(lambda p: n, None, normalize_output)
def workdps(n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(None, lambda d: n, normalize_output)
|
# -*- coding: utf-8 -*-
import numpy as np
from .datatuple import DataTuple
__all__ = [
'floatX', 'split_dataframe_to_arrays', 'split_numpy_array', 'slice_length',
'merge_slices', 'merge_slice_indices', 'merge_slice_masks',
'minibatch_indices_iterator', 'adaptive_density',
]
def floatX(array):
"""Enforce the value type of ``array`` to default float type.
Parameters
----------
array : numpy.ndarray
Numpy array, whose value type should be enforced to default
float type. If a number is specified, it will be converted
to 0-dimensional numpy array.
Returns
-------
numpy.ndarray
The converted numpy array.
"""
from madoka import config
return np.asarray(array, dtype=config.floatX)
def split_dataframe_to_arrays(df, label_as_int=False):
"""Split data frame to numpy arrays, with ``label`` column splitted.
Parameters
----------
df : pandas.DataFrame
Original data frame, which is expected to contain ``label`` column.
label_as_int : bool
If set to True, will discretize the labels into int32, according to
condition ``labels >= 0.5``.
Returns
-------
DataTuple
The splitted numpy arrays, where the first represents features without
labels, and the second represents the labels.
If the returned features only contain one column, it will be flatten.
"""
features = df.drop('label', axis=1, inplace=False)
labels = df['label']
if label_as_int:
labels = np.asarray(labels >= 0.5, dtype=np.int32)
else:
labels = labels.values
features = features.values
if features.shape[1] == 1:
features = features.flatten()
return DataTuple(features, labels)
def split_numpy_array(array_or_arrays, right_portion=None, right_size=None,
shuffle=True):
"""Split numpy arrays into two halves, by portion or by size.
Parameters
----------
array_or_arrays : numpy.ndarray | tuple[numpy.ndarray] | DataTuple
Numpy array, a tuple of numpy arrays, or a ``DataTuple`` instance.
right_portion : float
Portion of the right half. Ignored if ``right_size`` is specified.
right_size : int
Size of the right half.
shuffle : bool
Whether or not to shuffle before split?
Returns
-------
(numpy.ndarray, numpy.ndarray) | (DataTuple, DataTuple)
Splitted training and validation data.
If the given data is a single array, returns the splitted data
in a tuple of single arrays. Otherwise a tuple of ``DataTuple``
instances.
"""
if isinstance(array_or_arrays, np.ndarray):
direct_value = True
data_count = len(array_or_arrays)
elif isinstance(array_or_arrays, (tuple, list, DataTuple)):
direct_value = False
data_count = len(array_or_arrays[0])
else:
raise TypeError(
'%r is neither a numpy array, nor a tuple of arrays.' %
array_or_arrays
)
if right_size is None:
if right_portion is None:
raise ValueError('At least one of "right_portion", "right_size" '
'should be specified.')
if right_portion < 0.5:
right_size = data_count - int(data_count * (1.0 - right_portion))
else:
right_size = int(data_count * right_portion)
if right_size < 0:
right_size = 0
if right_size > data_count:
right_size = data_count
if shuffle:
indices = np.arange(data_count)
np.random.shuffle(indices)
get_train = lambda v: v[indices[: -right_size]]
get_valid = lambda v: v[indices[-right_size:]]
else:
get_train = lambda v: v[: -right_size, ...]
get_valid = lambda v: v[-right_size:, ...]
if direct_value:
return (get_train(array_or_arrays), get_valid(array_or_arrays))
else:
return (DataTuple(*(get_train(v) for v in array_or_arrays)),
DataTuple(*(get_valid(v) for v in array_or_arrays)))
def _slice_length_sub(start, stop, step):
if step > 0:
ret = (stop - start + step - 1) // step
elif step < 0:
ret = (start - stop - step - 1) // (-step)
else:
raise ValueError('Step of slice cannot be 0.')
if ret < 0:
ret = 0
return ret
def slice_length(length, slice_):
"""Compute the length of slice."""
start, stop, step = slice_.indices(length)
return _slice_length_sub(start, stop, step)
def merge_slices(length, *slices):
"""Merge multiple slices into one.
When we slice some object with the merged slice, it should produce
exactly the same output as if we slice the object with original
slices, one after another. That is to say, if we have:
merged_slice = merge_slices(len(array), slice1, slice2, ..., sliceN)
Then these two arrays should be the same:
array1 = array[slice1][slice2]...[sliceN]
array2 = array[merged_slice]
Parameters
----------
length : int
Length of the array that will be sliced.
*slices : tuple[slice]
Sequence of slices to be merged.
Returns
-------
slice
"""
if isinstance(length, slice):
raise TypeError('`length` is not specified.')
# deal with degenerated situations.
if not slices:
return slice(0, None, None)
if len(slices) == 1:
return slices[0]
# merge slices
def merge_two(slice1, slice2):
start1, stop1, step1 = slice1.indices(length)
# compute the actual length of slice1
length1 = _slice_length_sub(start1, stop1, step1)
# if the length becomes zero after applying slice1, we can stop here
# by returning an empty slice
if length1 <= 0:
return None
# now, apply slice2 on the slice1
start2, stop2, step2 = slice2.indices(length1)
length2 = _slice_length_sub(start2, stop2, step2)
if length2 <= 0:
return None
# shift slice2 by slice1
step = step1 * step2
start = start1 + start2 * step1
assert(0 <= start <= length - 1)
stop = start1 + stop2 * step1
assert ((stop - start) * step >= 0)
if step < 0:
# special fix: here stop <= -1 indicates to include all data
# before start
if stop <= -1:
stop = None
else:
# special fix: here stop >= n indicates to include all data
# after start
if stop >= length:
stop = None
return slice(start, stop, step)
ret = slices[0]
for s in slices[1:]:
ret = merge_two(ret, s)
if ret is None:
return slice(0, 0, None)
return ret
def merge_slice_indices(length, slice_, indices):
"""Merge a slice and integral indices.
This method merges a slice and integral indices, so that if we have:
merged = merge_slice_indices(len(array), slice_, indices)
Then the following two arrays should be same:
array1 = array[slice_][indices]
array2 = array[merged]
Parameters
----------
length : int
Length of the array that will be indexed.
slice_ : slice
Slice of the array.
indices : np.ndarray
Indices on the slice of the array.
Returns
-------
np.ndarray
"""
if not isinstance(indices, np.ndarray):
indices = np.asarray(indices, dtype=np.int)
# inspect the slice to get start, stop and step
start, stop, step = slice_.indices(length)
assert(0 <= start <= length-1)
assert(-1 <= stop <= length)
slen = _slice_length_sub(start, stop, step)
# merge the slice and the indices
indices_is_neg = indices < 0
if np.any(indices_is_neg):
indices = indices + indices_is_neg * slen
return start + indices * step
def merge_slice_masks(length, slice_, masks):
"""Merge a slice and boolean masks.
This method merges a slice and boolean masks, so that if we have:
merged = merge_slice_masks(len(array), slice_, masks)
Then the following two arrays should be same:
array1 = array[slice_][masks]
array2 = array[merged]
Parameters
----------
length : int
Length of the array that will be indexed.
slice_ : slice
Slice of the array.
masks : np.ndarray
Masks on the slice of the array.
Returns
-------
np.ndarray[int]
Indices of the chosen elements.
"""
if len(masks) != slice_length(length, slice_):
raise TypeError('Length of masks != slice: %r != %r.' %
(len(masks), slice_))
if not isinstance(masks, np.ndarray):
masks = np.asarray(masks, dtype=np.bool)
return merge_slice_indices(length, slice_, np.where(masks)[0])
def minibatch_indices_iterator(length, batch_size,
ignore_incomplete_batch=False):
"""Iterate through all the mini-batch indices.
Parameters
----------
length : int
Total length of data in an epoch.
batch_size : int
Size of each mini-batch.
ignore_incomplete_batch : bool
Whether or not to ignore the final batch if it contains less
than ``batch-size`` number of items?
Yields
------
np.ndarray
Indices of each mini-batch. The last mini-batch may contain less
indices than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
yield np.arange(start=start, stop=start + batch_size, dtype=np.int)
start += batch_size
if not ignore_incomplete_batch and start < length:
yield np.arange(start=start, stop=length, dtype=np.int)
def adaptive_density(values, bins=1000, pscale=10, plimit=.05):
"""Compute the density histogram of given values, with adaptive range.
It is often the case where outliers which consists only a small portion
of the data, but expanding the range of data vastly. This function
computes the density histogram with adaptive range, so that these outliers
can be discarded.
Parameters
----------
values : np.ndarray
Values whose density should be computed.
bins : int
Number of bins of the density.
pscale : float
Scaling factor to consider the shrink of density percentile
having significant effect on shrinking the range of data.
For example, if `pscale == 10`, then if we can shrink the
range of data by 10% with only stripping the densities of
top 0.5% and bottom 0.5%, then we could regard it a significant
shrinkage.
plimit : float
At most this percentile of densities can be shrinked.
This limit should apply to the sum of top and bottom percentile.
Returns
-------
(np.ndarray, np.ndarray, float, float)
Density values in each bin, the edges of these bins, as well as
the stripped densities at the left and the right. The length of
edges will be one more than the length of density values.
"""
hist, edges = np.histogram(values, bins=bins, density=True)
pwidth = edges[1] - edges[0]
left = 0
right = len(hist)
pleft = 0.0
pright = 0.0
candidate = None
while pleft + pright <= plimit and left < right:
if pright <= pleft:
right -= 1
pright += hist[right] * pwidth
else:
pleft += hist[left] * pwidth
left += 1
p_data = float(left + len(hist) - right) / len(hist)
if pleft + pright <= plimit and p_data <= (pleft + pright) * pscale:
candidate = (left, right)
if candidate:
left, right = candidate
left_stripped = np.sum(hist[:left]) * pwidth
right_stripped = np.sum(hist[right:]) * pwidth
hist_sum = np.sum(hist)
hscale = (hist_sum - (left_stripped + right_stripped)) / hist_sum
vmin = edges[left]
vmax = edges[right]
vstripped = values[(values >= vmin) & (values <= vmax)]
hist, edges = np.histogram(vstripped, bins=bins, density=True)
hist *= hscale
else:
left_stripped = 0.0
right_stripped = 0.0
return hist, edges, left_stripped, right_stripped
|
from cmd import Cmd
import typing as ty
from argparse_shell.namespace import Namespace
class InteractiveCmd(Cmd):
"""Subclass of the base :py:class:`cmd.Cmd`.
This class wraps a :py:class:`~argparse_shell.namespace.Namespace` and makes its commands available in an
interactive shell.
"""
_CMD_IMPLEMENTATION_PREFIX = "do_"
_HELP_IMPLEMENTATION_PREFIX = "help_"
identchars = Cmd.identchars + "-"
def __init__(self, namespace: Namespace, stop_on_eof: bool = True, *args, **kwargs) -> None:
"""Initialize an interactive shell working with a namespace instead of subclassing.
The interactive shell will use all commands consisting of multiple words as dashes.
:param namespace: Definition of a namespace containing commands to
:type namespace: Namespace
:param stop_on_eof: Whether to stop when a EOF (Ctrl + D) is received, defaults to True
:type stop_on_eof: bool, optional
"""
super().__init__(*args, **kwargs)
self._namespace = namespace
if stop_on_eof:
def do_eof(_):
self.stdout.write("EOF: exit\n")
return True
self.do_EOF = do_eof
def preloop(self) -> None:
"""Pre loop hook. Remove dashes from the word delimiters in the `readline` module"""
try:
import readline # pylint: disable=import-outside-toplevel
# Remove dashes from the readline auto completion delimiters
readline.set_completer_delims(readline.get_completer_delims().replace("-", ""))
except ImportError:
pass
def get_names(self) -> ty.List[str]:
"""
Get a list of all command and help method implementations in the namespace nested in this class
"""
names = list()
for cmd in self._namespace.values():
names.append(cmd.interactive_method_name)
names.append(cmd.interactive_help_method_name)
return names
def __getattr__(self, name: str):
"""Fallback for attribute accesses, to replace dashes with underscores"""
for prefix in (self._CMD_IMPLEMENTATION_PREFIX, self._HELP_IMPLEMENTATION_PREFIX):
if name.startswith(prefix):
try:
prefix_len = len(prefix)
cmd = self._namespace[name[prefix_len:]]
return cmd.get_interactive_method_for_prefix(prefix, self.stdout)
except (KeyError, ValueError) as exc:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") from exc
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
def emptyline(self) -> bool:
# We don't want to execute the last command if nothing was typed
return False
|
import importlib
from pathlib import Path
from app import app
CURRENT_MODULE = __name__
for path in Path(__file__).parent.glob('./*.py'):
if path.stem == '__init__':
continue
module = importlib.import_module(f'.{path.stem}', CURRENT_MODULE)
if hasattr(app, 'router'):
app.include_router(module.router)
|
"""
This module contains description of function and class
for normal (Gauss) distribution.
References
----------
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html
"""
from typing import Tuple
from scipy.stats import norm
from method_of_moments.continuous._base_continuous import BaseContinuous
class Norm(BaseContinuous):
"""Class for Normal (Gauss) Distribution."""
def get_parameters(self) -> Tuple[float, float]:
"""Return parameters of distribution."""
return self.mean, self.std
def pdf(self, arg: float) -> float:
"""Return probability density function at a given argument."""
return norm.pdf(arg, loc=self.mean, scale=self.std)
def cdf(self, arg: float) -> float:
"""Return cumulative density function at a given argument."""
return norm.cdf(arg, loc=self.mean, scale=self.std)
|
"""
Copyright (c) 2019, Matt Pewsey
"""
import scipy.optimize
__all__ = ['fsolve']
def fsolve(*args, **kwargs):
"""
Finds the roots of a function. If the function fails to find a solution,
an exception is raised. See :func:`scipy.optimize.fsolve` for list of
parameters.
"""
kwargs['full_output'] = True
x, infodict, ier, mesg = scipy.optimize.fsolve(*args, **kwargs)
if ier != 1:
raise ValueError('{}\n{}'.format(mesg, infodict))
return x
|
Subsets and Splits