filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_16295
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list Tensorboard experiments in Vertex AI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ai.tensorboard_experiments import client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import endpoint_util
from googlecloudsdk.command_lib.ai import flags
from googlecloudsdk.core import resources
def _GetUriBeta(tensorboard):
ref = resources.REGISTRY.ParseRelativeName(
tensorboard.name,
constants.TENSORBOARD_EXPERIMENTS_COLLECTION,
api_version=constants.AI_PLATFORM_API_VERSION[constants.BETA_VERSION])
return ref.SelfLink()
def _GetUriAlpha(tensorboard):
ref = resources.REGISTRY.ParseRelativeName(
tensorboard.name,
constants.TENSORBOARD_EXPERIMENTS_COLLECTION,
api_version=constants.AI_PLATFORM_API_VERSION[constants.ALPHA_VERSION])
return ref.SelfLink()
def _Run(args, version):
tensorboard_ref = args.CONCEPTS.tensorboard.Parse()
region = tensorboard_ref.AsDict()['locationsId']
with endpoint_util.AiplatformEndpointOverrides(
version=version, region=region):
return client.TensorboardExperimentsClient(version=version).List(
tensorboard_ref=tensorboard_ref,
limit=args.limit,
page_size=args.page_size,
sort_by=args.sort_by)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ListBeta(base.ListCommand):
"""List the Tensorboard experiments of the given project, region, and Tensorboard."""
detailed_help = {
'EXAMPLES':
"""\
To list Tensorboard Experiments in Tensorboard `12345`:
$ {command} 12345
""",
}
@staticmethod
def Args(parser):
flags.AddTensorboardResourceArg(parser,
'to create a Tensorboard experiment')
parser.display_info.AddUriFunc(_GetUriBeta)
def Run(self, args):
return _Run(args, constants.BETA_VERSION)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ListAlpha(base.ListCommand):
"""List the Tensorboard experiments of the given project, region, and Tensorboard."""
@staticmethod
def Args(parser):
flags.AddTensorboardResourceArg(parser,
'to create a Tensorboard experiment')
parser.display_info.AddUriFunc(_GetUriAlpha)
def Run(self, args):
return _Run(args, constants.ALPHA_VERSION)
|
the-stack_106_16296
|
import pytorch_lightning as pl
import hydra
import torch
import yaml
import os
import numpy as np
from lib.snarf_model import SNARFModel
@hydra.main(config_path="config", config_name="config")
def main(opt):
print(opt.pretty())
pl.seed_everything(42, workers=True)
torch.set_num_threads(10)
# dataset
datamodule = hydra.utils.instantiate(opt.datamodule, opt.datamodule)
datamodule.setup(stage='fit')
np.savez('meta_info.npz', **datamodule.meta_info)
data_processor = None
if 'processor' in opt.datamodule:
data_processor = hydra.utils.instantiate(opt.datamodule.processor,
opt.datamodule.processor,
meta_info=datamodule.meta_info)
# logger
with open('.hydra/config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
logger = pl.loggers.WandbLogger(project='snarf', config=config)
# checkpoint
checkpoint_path = './checkpoints/last.ckpt'
if not os.path.exists(checkpoint_path) or not opt.resume:
checkpoint_path = None
checkpoint_callback = pl.callbacks.ModelCheckpoint(save_top_k=-1,
monitor=None,
dirpath='./checkpoints',
save_last=True,
every_n_val_epochs=1)
trainer = pl.Trainer(logger=logger,
callbacks=[checkpoint_callback],
accelerator=None,
resume_from_checkpoint=checkpoint_path,
**opt.trainer)
model = SNARFModel(opt=opt.model,
meta_info=datamodule.meta_info,
data_processor=data_processor)
trainer.fit(model, datamodule=datamodule)
if __name__ == '__main__':
main()
|
the-stack_106_16299
|
import sys
import os
import re
INPUT_DIR = os.path.join('..', 'converted')
PREFIX = '''\\documentclass[a4paper]{article}
\\makeatletter
\\renewcommand\\tableofcontents{%
\\@starttoc{toc}%
}
\\makeatother
\\linespread{1.2}
\\usepackage[russian]{babel}
\\usepackage{csquotes}
\\usepackage{fontspec}
\\setmainfont{PT Serif}
\\usepackage{hyperref}
\\usepackage{linguex}
\\usepackage{multirow}
\\usepackage{graphicx}
\\usepackage{longtable}
\\usepackage{booktabs}
%\\title{}
%\\author{}
%\\date{}
\\hyphenation{}
\\begin{document}
\\maketitle
'''
def cleanup(filename):
in_path = os.path.join(INPUT_DIR, filename)
if not os.path.exists(in_path):
raise FileNotFoundError(
'The requisite file is not found in the "converted" directory.')
with open(in_path, 'r', encoding='utf-8') as inp:
lines = inp.readlines()
# Remove TOC
lo = hi = None
for i, line in enumerate(lines):
if line.startswith(r'\protect\hyperlink'):
if lo is None:
lo = i
hi = i
lines = lines[:lo] + ['\\tableofcontents\n'] + lines[(hi+2):]
lines = list(filter(lambda line: not line.startswith(r'\hypertarget{'),
lines))
txt = ''.join(lines)
# Remove closing braces left from hypertargets
txt = re.sub(r'label{(\S+)}}', 'label{\g<1>}', txt)
# Remove section numbers from bibliography and recommended reading
txt = re.sub(r'section{Основная[ \n]+литература}',
r'section*{Основная литература}', txt)
txt = re.sub(r'section{Библиография}', r'section*{Библиография}', txt)
# Normalise italics
txt = txt.replace('\\emph{', '\\textit{')
# Normalise dashes
txt = txt.replace(' -- ', ' --- ')
# Make a standalone document
txt = PREFIX + txt + "\n\n \\end{document}"
new_filename = filename.replace('.tex', '_cleaned.tex')
with open(os.path.join(INPUT_DIR, new_filename), 'w', encoding='utf-8') as out:
out.write(txt)
return new_filename
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python cleanup.py FILE')
print('FILE is assumed to be located in the "converted" directory.')
filename = sys.argv[1]
try:
cleanup(filename)
except Exception as e:
print(f'An error has occurred: {e}')
sys.exit(1)
|
the-stack_106_16300
|
import argparse
import logging
import os
from builder.build_phrasal_thesauri_offline import get_corpus_features_cmd_parser
from discoutils.misc import force_symlink
'''
If using SVD, symlink the reduced vectors for all unigrams and NPs (done by build_phrasal_..
as as part of training Baroni) to the right location.
Otherwise add unigram vectors (must exists) to ngram observed vectors (must exist)
and write to a single file in e.g. exp10-13-composed-vectors
'''
def do_work(corpus, features, svd_dims):
prefix = '/lustre/scratch/inf/mmb28/FeatureExtractionToolkit'
name = 'wiki' if corpus == 11 else 'gigaw'
# where should output be written
svd_appendage = '' if svd_dims == 0 else '-%d' % svd_dims
output_file = os.path.join(prefix,
'exp%d-%d-composed-ngrams-ppmi-svd' % (corpus, features),
'AN_NN_%s%s_Observed.events.filtered.strings' % (name, svd_appendage))
# contains SVD-reduced N,J and NP observed vectors, built by other script
vectors_file = '%s/exp%d-%db/exp%d-with-obs-phrases-SVD%d.events.filtered.strings' % \
(prefix, corpus, features, corpus, svd_dims)
force_symlink(vectors_file, output_file)
def get_cmd_parser():
parser = argparse.ArgumentParser(parents=[get_corpus_features_cmd_parser()])
# add options specific to this script here
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--svd', choices=(0, 30, 100, 300, 1000), nargs='+', type=int,
help='What SVD dimensionalities to build observed-vector thesauri from. '
'Vectors must have been produced and reduced already. 0 stand for unreduced.')
return parser
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(asctime)s\t%(module)s.%(funcName)s (line %(lineno)d)\t%(levelname)s : %(message)s")
parameters = get_cmd_parser().parse_args()
logging.info(parameters)
corpus = 10 if parameters.corpus == 'gigaword' else 11
if parameters.features == 'dependencies':
raise ValueError('Observed dependency vectors for NPs do not exist')
features = 13
for dims in parameters.svd:
do_work(corpus, features, dims)
|
the-stack_106_16303
|
import geopandas as gpd
from pathlib import Path
from osgeo import gdal
import rasterio
import shutil
import subprocess
import tempfile
from tqdm import tqdm
from ..raster.gdalutils import rasterize
from ..raster.gdalutils import PROXIMITY_PATH
def calc_distance_to_border(polygons, template_raster, dst_raster, overwrite=False,
keep_interim_files=False, verbosity=0):
"""Calculate the distance of each raster cell (in and outside the polygons) to the next polygon border.
Arguments:
polygons {str} -- Filename to a geopandas-readable file with polygon features.
template_raster {[type]} -- Filename to a rasterio-readable file.
dst_raster {[type]} -- Destination filename for the distance to polygon border raster file (tif).
Keyword Arguments:
overwrite {bool} -- Overwrite files if they exists? (default: {False})
keep_interim_files {bool} -- Keep the interim line vector and raster files (default: {True})
Returns:
[type] -- [description]
"""
if Path(dst_raster).exists() and not overwrite:
if verbosity > 0:
print(f"Returning 0 - File exists: {dst_raster}")
return 0
with rasterio.open(template_raster) as tmp:
crs = tmp.crs
dst_raster = Path(dst_raster)
dst_raster.parent.mkdir(exist_ok=True, parents=True)
tempdir = Path(tempfile.mkdtemp(prefix=f"TEMPDIR_{dst_raster.stem}_", dir=dst_raster.parent))
interim_file_lines_vector = tempdir / "interim_sample_vector_dataset_lines.shp"
interim_file_lines_raster = tempdir / "interim_sample_vector_dataset_lines.tif"
exit_code = convert_polygons_to_lines(polygons,
interim_file_lines_vector,
crs=crs,
add_allone_col=True)
rasterize(src_vector=str(interim_file_lines_vector),
burn_attribute="ALLONE",
src_raster_template=str(template_raster),
dst_rasterized=str(interim_file_lines_raster),
gdal_dtype=1)
cmd = f"{PROXIMITY_PATH} " \
f"{str(Path(interim_file_lines_raster).absolute())} " \
f"{str(Path(dst_raster).absolute())} " \
f"-ot Float32 -distunits PIXEL -values 1 -maxdist 255"
subprocess.check_call(cmd, shell=True)
if not keep_interim_files:
shutil.rmtree(tempdir)
else:
if verbosity > 0:
print(f"Interim files are in {tempdir}")
return 0
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False):
"""Convert polygons to lines.
Arguments:
src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be
converted to lines.
dst_lines {[type]} -- Filename where to write the line vector dataset to.
Keyword Arguments:
crs {dict or str} -- Output projection parameters as string or in dictionary format.
This will reproject the data when a crs is given (not {None}) (default: {None}).
add_allone_col {bool} -- Add an additional attribute column with all ones.
This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}).
Returns:
int -- Exit code 0 if successeful.
"""
gdf = gpd.read_file(src_polygons)
geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"]
lines = []
row_ids = []
for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)):
boundary = pol.boundary
if boundary.type == 'MultiLineString':
for line in boundary:
lines.append(line)
row_ids.append(i_row)
else:
lines.append(boundary)
row_ids.append(i_row)
gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :]
gdf_lines["Coordinates"] = lines
gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs)
if crs is not None:
gdf_lines = gdf_lines.to_crs(crs)
if add_allone_col:
gdf_lines["ALLONE"] = 1
Path(dst_lines).parent.mkdir(exist_ok=True, parents=True)
gdf_lines.to_file(dst_lines)
return 0
|
the-stack_106_16304
|
import datetime
from sqlalchemy import and_
from app.cruds.table_repository import TableRepository
from db import models
class EmailDetailsCrud(TableRepository):
def __init__(self, db) -> None:
super().__init__(db=db, entity=models.EmailDetails)
def create_message_details(self, subject: str,
from_address: str,
message_id: str,
thread_id: int,
email_account_provider_id: int,
body: str,
operation_datetime: datetime,
history_id: str,
is_read: bool,
type_: str,
contact_id: int,
account_id: int):
check_message = self.get_message_by_thread_id_and_message_id(message_id, thread_id)
if check_message is None:
email_details_object = self.entity(subject=subject,
from_address=from_address,
message_id=message_id,
thread_id=thread_id,
email_account_provider_id=email_account_provider_id, body=body,
operation_datetime=operation_datetime, history_id=history_id,
is_read=is_read, type=type_, contact_id=contact_id,
account_id=account_id)
else:
return check_message
self.db.add(email_details_object)
self.db.flush()
return email_details_object
def get_message_by_thread_id_and_message_id(self, message_id, thread_id):
return self.db.query(self.entity).filter(
and_(self.entity.thread_id == thread_id, self.entity.message_id == message_id)).first()
def get_message_list_by_thread_id(self, account_id: int, thread_id: str, email_provider_id):
return self.db.query(self.entity).filter(
and_(self.entity.account_id == account_id,
self.entity.thread_id == thread_id,
self.entity.email_account_provider_id == email_provider_id)
).order_by(self.entity.operation_datetime).all()
def update_mail_seen_unseen_status(self, message_id, thread_id, account_id, mail_provider_id, is_seen):
return self.db.query(self.entity).filter(and_(self.entity.message_id == message_id,
self.entity.thread_id == thread_id,
self.entity.account_id == account_id,
self.entity.email_account_provider_id == mail_provider_id)
).update({"is_read": is_seen}, synchronize_session='fetch')
def update_mail_read_status(self, message_id, thread_id, account_id, mail_provider_id):
return self.db.query(self.entity).filter(and_(self.entity.message_id == message_id,
self.entity.thread_id == thread_id,
self.entity.account_id == account_id,
self.entity.email_account_provider_id == mail_provider_id)
).update({"is_read": True}, synchronize_session='fetch')
def get_message_by_message_id(self, message_id):
return self.db.query(self.entity).filter(self.entity.message_id == message_id).first()
|
the-stack_106_16307
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines recurrent network layers that train using l0 regularization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from state_of_sparsity.layers.l0_regularization import common
from state_of_sparsity.layers.utils import rnn_checks
from tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import
class RNNCell(tf.nn.rnn_cell.BasicRNNCell):
"""RNN cell trained with l0 regularization.
This class implements an RNN cell trained with l0 regularization following
the technique from https://arxiv.org/abs/1712.01312.
"""
def __init__(
self,
kernel_weights,
bias_weights,
num_units,
beta=common.BETA,
gamma=common.GAMMA,
zeta=common.ZETA,
training=True,
eps=common.EPSILON,
activation=None,
name=None):
R"""Initialize the RNN cell.
Args:
kernel_weights: 2-tuple of Tensors, where the first tensor is the unscaled
weight values and the second is the log of the alpha values for the hard
concrete distribution.
bias_weights: The weight matrix to use for the biases.
num_units: int, The number of units in the RNN cell.
beta: The beta parameter, which controls the "temperature" of
the distribution. Defaults to 2/3 from the above paper.
gamma: The gamma parameter, which controls the lower bound of the
stretched distribution. Defaults to -0.1 from the above paper.
zeta: The zeta parameters, which controls the upper bound of the
stretched distribution. Defaults to 1.1 from the above paper.
training: boolean, Whether the model is training or being evaluated.
eps: Small constant value to add to the term inside the square-root
operation to avoid NaNs.
activation: Activation function of the inner states. Defaults to `tanh`.
name: String, the name of the layer.
Raises:
RuntimeError: If the input kernel_weights is not a 2-tuple of Tensors
that have the same shape.
"""
super(RNNCell, self).__init__(
num_units=num_units,
activation=activation,
reuse=None,
name=name,
dtype=None)
# Verify and save the weight matrices
rnn_checks.check_rnn_weight_shapes(kernel_weights, bias_weights, num_units)
self._weight_parameters = kernel_weights
self._bias = bias_weights
self._beta = beta
self._gamma = gamma
self._zeta = zeta
self._training = training
self._eps = eps
def build(self, _):
"""Initializes the weights for the RNN."""
with ops.init_scope():
theta, log_alpha = self._weight_parameters
if self._training:
weight_noise = common.hard_concrete_sample(
log_alpha,
self._beta,
self._gamma,
self._zeta,
self._eps)
else:
weight_noise = common.hard_concrete_mean(
log_alpha,
self._gamma,
self._zeta)
self._weights = weight_noise * theta
self.built = True
def call(self, inputs, state):
gate_inputs = tf.matmul(
tf.concat([inputs, state], axis=1),
self._weights)
gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
class LSTMCell(tf.nn.rnn_cell.LSTMCell):
"""LSTM cell trained with l0 regularization.
This class implements an LSTM cell trained with l0 regularization following
the technique from https://arxiv.org/abs/1712.01312.
"""
def __init__(
self,
kernel_weights,
bias_weights,
num_units,
beta=common.BETA,
gamma=common.GAMMA,
zeta=common.ZETA,
training=True,
eps=common.EPSILON,
forget_bias=1.0,
activation=None,
name="lstm_cell"):
R"""Initialize the LSTM cell.
Args:
kernel_weights: 2-tuple of Tensors, where the first tensor is the unscaled
weight values and the second is the log of the alpha values for the hard
concrete distribution.
bias_weights: the weight matrix to use for the biases.
num_units: int, The number of units in the LSTM cell.
beta: The beta parameter, which controls the "temperature" of
the distribution. Defaults to 2/3 from the above paper.
gamma: The gamma parameter, which controls the lower bound of the
stretched distribution. Defaults to -0.1 from the above paper.
zeta: The zeta parameters, which controls the upper bound of the
stretched distribution. Defaults to 1.1 from the above paper.
training: boolean, Whether the model is training or being evaluated.
eps: Small constant value to add to the term inside the square-root
operation to avoid NaNs.
forget_bias: float, The bias added to forget gates (see above).
activation: Activation function of the inner states. Defaults to `tanh`.
It could also be string that is within Keras activation function names.
name: String, the name of the layer.
Raises:
RuntimeError: If the input kernel_weights is not a 2-tuple of Tensors
that have the same shape.
"""
super(LSTMCell, self).__init__(
num_units=num_units,
forget_bias=forget_bias,
state_is_tuple=True,
activation=activation,
name=name)
# Verify and save the weight matrices
rnn_checks.check_lstm_weight_shapes(kernel_weights, bias_weights, num_units)
self._weight_parameters = kernel_weights
self._bias = bias_weights
self._beta = beta
self._gamma = gamma
self._zeta = zeta
self._training = training
self._eps = eps
def build(self, _):
"""Initialize the weights for the LSTM."""
with ops.init_scope():
theta, log_alpha = self._weight_parameters
if self._training:
weight_noise = common.hard_concrete_sample(
log_alpha,
self._beta,
self._gamma,
self._zeta,
self._eps)
else:
weight_noise = common.hard_concrete_mean(
log_alpha,
self._gamma,
self._zeta)
self._weights = weight_noise * theta
self.built = True
def call(self, inputs, state):
(c_prev, m_prev) = state
lstm_matrix = tf.matmul(
tf.concat([inputs, m_prev], axis=1),
self._weights)
lstm_matrix = tf.nn.bias_add(lstm_matrix, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(
value=lstm_matrix,
num_or_size_splits=4,
axis=1)
sigmoid = tf.sigmoid
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
m = sigmoid(o) * self._activation(c)
new_state = tf.nn.rnn_cell.LSTMStateTuple(c, m)
return m, new_state
|
the-stack_106_16308
|
from __future__ import print_function
import os
import pathlib
from box import Box, BoxList
from PIL import Image
import requests
import yaml
SLACKMOJI_DL_DIR = 'downloaded'
# https://stackoverflow.com/questions/25108581/python-yaml-dump-bad-indentation
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
def remove_file(f):
try:
print('removing file', f)
os.remove(f)
except OSError:
pass
def download_file(url, output_file):
response = requests.get(url)
with open(output_file, 'wb') as f:
f.write(response.content)
return response
def write_yaml_file(data, output_file):
with open(output_file, "a") as f:
yaml.dump(data, f, Dumper=MyDumper, default_flow_style=False)
def create_dirs(dir):
"""Create directory and all intermediate-level directories"""
if not os.path.isdir(dir):
os.makedirs(dir)
def get_categories(slackmojis):
categories = set()
categories.add('uncategorized')
for slackmoji in slackmojis:
if 'category' in slackmoji:
category = str(slackmoji.category.name).lower().replace(' ', '-')
categories.add(category)
return categories
def valid_image(name, src):
ext = os.path.splitext(src)[1]
# the downloaded filename is different from if you download it manually
# because of the possible duplicates
# dl_file = os.path.join(SLACKMOJI_DL_DIR, ''.join([name, ext]))
dl_file = pathlib.Path(SLACKMOJI_DL_DIR) / ''.join([name, ext])
# print(f' * validating name: {name}, src: {src}, dl_file: {dl_file}')
if not dl_file.is_file():
download_file(src, dl_file)
# Is it an image?
valid_image = True
try:
im = Image.open(dl_file)
if im.width > 128 or im.height > 128:
print(f':{name}: is {im.size}\t{src}')
valid_image = False
except IOError:
print(f':{name}: could not be read\t{src}')
valid_image = False
return valid_image
def main():
url = "http://slackmojis.com/emojis.json"
output_file = 'emojis.json'
remove_file(output_file)
download_file(url, output_file)
# for downloaded emoji
create_dirs(SLACKMOJI_DL_DIR)
slackmoji_pack_dir = 'slackmoji-packs'
create_dirs(slackmoji_pack_dir)
slackmojis = BoxList.from_json(filename=output_file)
categories = get_categories(slackmojis)
data = {}
for category in categories:
data[category] = {'emojis': []}
output_file_yaml = os.path.join(slackmoji_pack_dir,
'slackmojis-{}.yaml'.format(category))
remove_file(output_file_yaml)
data_header = {
'title': 'slackmoji-{}'.format(category)
}
write_yaml_file(data_header, output_file_yaml)
name_count = Box()
for slackmoji in slackmojis:
name = str(slackmoji['name'])
category = 'uncategorized'
if 'category' in slackmoji:
category = str(slackmoji.category.name).lower().replace(' ', '-')
output_file_yaml = os.path.join(slackmoji_pack_dir,
'slackmojis-{}.yaml'.format(category))
# Special cases - a.k.a stupid cases
if name == 'yes2':
# there are two 'yes' and one 'yes2' emojis already
name = 'yes2-1'
if name == 'no2':
# there are two 'no' and one 'no2' emojis already
name = 'no2-1'
sports = ['mlb', 'nba', 'nfl', 'nhl']
if category in sports:
# The NFL logo should not be :nfl-nfl:
if name == 'nfl':
pass
else:
name = '{}-{}'.format(category, name)
if 'facebook' in category:
name = 'fb-{}'.format(name)
if 'scrabble' in category:
name = 'scrabble-{}'.format(name)
name_count[name] = name_count[name] + 1 if name in name_count else 1
if name_count[name] > 1:
name = ''.join([name, str(name_count[name])])
src = str(slackmoji['image_url']).split('?')[0]
if not valid_image(name, src):
continue
slackmoji_data = {
'name': name,
'src': src
}
data[category]['emojis'].append(slackmoji_data)
for category in categories:
output_file_yaml = os.path.join(slackmoji_pack_dir,
'slackmojis-{}.yaml'.format(category))
write_yaml_file(data[category], output_file_yaml)
if __name__ == "__main__":
main()
|
the-stack_106_16311
|
'''
Create and run workflow
Uses
https://github.com/couler-proj/couler
'''
import os
import json
import urllib
import couler.argo as couler
from couler.argo_submitter import ArgoSubmitter
from . import cargo
from ExecutionEnvironment.executor import (
setup_bash_patterns,
BaseExecutor,
Workflow,
)
class ArgoExecutor(BaseExecutor):
def __init__(self, workflow, executor_parameters=None):
# Set defaults
if not executor_parameters:
executor_parameters = {}
if 'workflow_name' not in executor_parameters:
executor_parameters['workflow_name'] = ''
if 'image_registry' not in executor_parameters:
executor_parameters['image_registry'] = '127.0.0.1'
if 'work_path' not in executor_parameters:
executor_parameters['work_path'] = '/work'
super().__init__(workflow, executor_parameters)
def build(self, output, output_format='argo', workflow_id=None, obc_client=False):
self.decompose(
break_down_on_tools=True,
update_server_status=True,
)
json_wf = json.dumps(self.decomposed)
print ('JSON DAG:')
print (json.dumps(self.decomposed, indent=4))
print ('='*20)
ret = cargo.pipeline(json_wf, self.workflow_name, self.image_registry, self.work_path)
print ('ARGO WORKFLOW:')
print (ret)
print ('='*20)
return ret
def dispatch(*,
nice_id,
client_parameters,
workflow_object,
server_url,
):
executor_parameters = {
'workflow_name': 'openbio-' + nice_id,
'image_registry': client_parameters['image_registry'],
'work_path': os.path.join(client_parameters['work_path'], nice_id)
}
namespace = client_parameters['namespace']
# Setup bash scripts
args = type('A', (), {
'server': server_url,
'insecure': False,
})
setup_bash_patterns(args)
# Parse cytoscape workflow
w = Workflow(workflow_object = workflow_object, askinput='NO', obc_server=server_url, workflow_id=None)
# Create argo scripts
e = ArgoExecutor(w, executor_parameters)
e.build(output=None, output_format='argo', workflow_id=None, obc_client=server_url)
# Submit with couler
submitter = ArgoSubmitter(namespace=namespace)
result = couler.run(submitter=submitter)
# Get visualization url
visualization_url = urllib.parse.urlparse(client_parameters['argo_url'])._replace(path='/workflows/%s/%s' % (namespace, result['metadata']['name'])).geturl()
return {
'visualization_url': visualization_url,
}
|
the-stack_106_16313
|
import os
import ssl
import smtplib
from typing import Optional, List
from email.utils import formatdate
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
class Gmail:
def __init__(self, from_: str, to: Optional[str] = None, cc: Optional[str] = None,
bcc: Optional[str] = None, subject: str = "", body: str = "",
host: str = "smtp.gmail.com", port=465, sep: str = ",") -> None:
self.from_: str = from_
self.to: Optional[str] = to
self.cc: Optional[str] = cc
self.bcc: Optional[str] = bcc
self.subject: str = subject
self.body: str = body
self.attachment: List[str] = []
context = ssl.create_default_context()
self.server = smtplib.SMTP_SSL(host=host, port=port, context=context)
self.sep = sep
def login(self, user: str, password: str) -> None:
self.server.login(user=user, password=password)
def add_attachment(self, attachment_path: str) -> None:
self.attachment.append(attachment_path)
def attachment_len(self) -> int:
return len(self.attachment)
def _set_attachment(self, msg):
for file_path in self.attachment:
if not os.path.exists(file_path):
continue
file_name: str = os.path.basename(file_path)
with open(file_path, "rb") as f:
part = MIMEApplication(f.read(), Name=file_name)
part["Content-Disposition"] = f'attachment; filename="{file_name}"'
msg.attach(part)
return msg
def _create_msg(self, is_html: bool):
msg = MIMEMultipart()
msg.attach(MIMEText(self.body, "html" if is_html else "plain"))
msg["Subject"] = self.subject
msg["From"] = self.from_
msg["To"] = self.to
msg["Cc"] = self.cc
msg["Bcc"] = self.bcc
msg["Date"] = formatdate()
return self._set_attachment(msg)
@staticmethod
def _split_addrs(addrs: Optional[str], sep: str):
if type(addrs) is str:
return addrs.split(sep)
return []
def _get_recipients_list(self) -> list:
to: list = self._split_addrs(self.to, self.sep)
cc: list = self._split_addrs(self.to, self.sep)
bcc: list = self._split_addrs(self.to, self.sep)
return to + cc + bcc
def send(self, is_html: bool = False) -> None:
msg = self._create_msg(is_html=is_html)
recipients_list: list = self._get_recipients_list()
self.server.sendmail(from_addr=self.from_, to_addrs=recipients_list, msg=msg.as_string())
def close(self) -> None:
self.server.close()
|
the-stack_106_16314
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import scale_vector
from compas.geometry import normalize_vector
from compas.geometry import add_vectors
from compas.geometry import subtract_vectors
from compas.geometry import cross_vectors
from compas.geometry import centroid_points
from compas.geometry import intersection_line_line
from compas.geometry import normal_polygon
from compas.utilities import pairwise
__all__ = [
'offset_line',
'offset_polyline',
'offset_polygon',
]
def offset_line(line, distance, normal=[0.0, 0.0, 1.0]):
"""Offset a line by a distance.
Parameters
----------
line : tuple
Two points defining the line.
distances : float or list of floats
The offset distance as float.
A single value determines a constant offset. Alternatively, two
offset values for the start and end point of the line can be used to
a create variable offset.
normal : vector
The normal of the offset plane.
Returns
-------
offset line : tuple
Two points defining the offset line.
Notes
-----
The offset direction is chosen such that if the line were along the positve
X axis and the normal of the offset plane is along the positive Z axis, the
offset line is in the direction of the postive Y axis.
Examples
--------
.. code-block:: python
line = [(0.0, 0.0, 0.0), (3.0, 3.0, 0.0)]
distance = 0.2 # constant offset
line_offset = offset_line(line, distance)
print(line_offset)
distance = [0.2, 0.1] # variable offset
line_offset = offset_line(line, distance)
print(line_offset)
"""
a, b = line
ab = subtract_vectors(b, a)
direction = normalize_vector(cross_vectors(normal, ab))
if isinstance(distance, (list, tuple)):
distances = distance
else:
distances = [distance, distance]
u = scale_vector(direction, distances[0])
v = scale_vector(direction, distances[1])
c = add_vectors(a, u)
d = add_vectors(b, v)
return c, d
def offset_polygon(polygon, distance):
"""Offset a polygon (closed) by a distance.
Parameters
----------
polygon : list of point
The XYZ coordinates of the corners of the polygon.
The first and last coordinates must not be identical.
distance : float or list of float
The offset distance as float.
A single value determines a constant offset globally.
Alternatively, pairs of local offset values per line segment can be used to create variable offsets.
Distance > 0: offset to the outside, distance < 0: offset to the inside.
Returns
-------
offset polygon : list of point
The XYZ coordinates of the corners of the offset polygon.
The first and last coordinates are identical.
Notes
-----
The offset direction is determined by the normal of the polygon.
If the polygon is in the XY plane and the normal is along the positive Z axis,
positive offset distances will result in an offset towards the inside of the
polygon.
The algorithm works also for spatial polygons that do not perfectly fit a plane.
Examples
--------
.. code-block:: python
polygon = [
(0.0, 0.0, 0.0),
(3.0, 0.0, 1.0),
(3.0, 3.0, 2.0),
(1.5, 1.5, 2.0),
(0.0, 3.0, 1.0),
(0.0, 0.0, 0.0)
]
distance = 0.5 # constant offset
polygon_offset = offset_polygon(polygon, distance)
print(polygon_offset)
distance = [
(0.1, 0.2),
(0.2, 0.3),
(0.3, 0.4),
(0.4, 0.3),
(0.3, 0.1)
] # variable offset
polygon_offset = offset_polygon(polygon, distance)
print(polygon_offset)
"""
p = len(polygon)
if isinstance(distance, (list, tuple)):
distances = distance
else:
distances = [distance] * p
d = len(distances)
if d < p:
distances.extend(distances[-1:] * (p - d))
normal = normal_polygon(polygon)
offset = []
for line, distance in zip(pairwise(polygon + polygon[:1]), distances):
offset.append(offset_line(line, distance, normal))
points = []
for l1, l2 in pairwise(offset[-1:] + offset):
x1, x2 = intersection_line_line(l1, l2)
if x1 and x2:
points.append(centroid_points([x1, x2]))
else:
points.append(x1)
return points
def offset_polyline(polyline, distance, normal=[0.0, 0.0, 1.0]):
"""Offset a polyline by a distance.
Parameters
----------
polyline : list of point
The XYZ coordinates of the vertices of a polyline.
distance : float or list of tuples of floats
The offset distance as float.
A single value determines a constant offset globally.
Alternatively, pairs of local offset values per line segment can be used to create variable offsets.
Distance > 0: offset to the "left", distance < 0: offset to the "right".
normal : vector
The normal of the offset plane.
Returns
-------
offset polyline : list of point
The XYZ coordinates of the resulting polyline.
"""
p = len(polyline)
if isinstance(distance, (list, tuple)):
distances = distance
else:
distances = [distance] * p
d = len(distances)
if d < p:
distances.extend(distances[-1:] * (p - d))
offset = []
for line, distance in zip(pairwise(polyline), distances):
offset.append(offset_line(line, distance, normal))
points = [offset[0][0]]
for l1, l2 in pairwise(offset):
x1, x2 = intersection_line_line(l1, l2)
if x1 and x2:
points.append(centroid_points([x1, x2]))
else:
points.append(x1)
points.append(offset[-1][1])
return points
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import compas
from compas.plotters import MeshPlotter
from compas.datastructures import Mesh
mesh = Mesh.from_obj(compas.get('faces.obj'))
polygons = []
lines = []
for fkey in mesh.faces():
points = mesh.face_coordinates(fkey)
offset = offset_polyline(points, 0.1)
polygons.append({
'points': offset,
'edgecolor': '#ff0000'
})
for a, b in zip(points, offset):
lines.append({
'start': a,
'end': b,
'color': '#00ff00'
})
plotter = MeshPlotter(mesh)
plotter.draw_faces()
plotter.draw_polylines(polygons)
plotter.draw_lines(lines)
plotter.show()
|
the-stack_106_16315
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v2.model.role_update_data import RoleUpdateData
globals()["RoleUpdateData"] = RoleUpdateData
class RoleUpdateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"data": (RoleUpdateData,),
}
attribute_map = {
"data": "data",
}
read_only_vars = {}
def __init__(self, data, *args, **kwargs):
"""RoleUpdateRequest - a model defined in OpenAPI
Args:
data (RoleUpdateData):
Keyword Args:
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.data = data
@classmethod
def _from_openapi_data(cls, data, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(RoleUpdateRequest, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.data = data
return self
|
the-stack_106_16317
|
# -*- coding: utf-8 -*-
"""
Compute the shortest paths and path lengths between nodes in the graph.
These algorithms work with undirected and directed graphs.
For directed graphs the paths can be computed in the reverse
order by first flipping the edge orientation using R=G.reverse(copy=False).
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Sérgio Nery Simões <[email protected]>'])
__all__ = ['shortest_path', 'all_shortest_paths',
'shortest_path_length', 'average_shortest_path_length',
'has_path']
def has_path(G, source, target):
"""Return True if G has a path from source to target, False otherwise.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
"""
try:
sp = nx.shortest_path(G,source, target)
except nx.NetworkXNoPath:
return False
return True
def shortest_path(G, source=None, target=None, weight=None):
"""Compute shortest paths in the graph.
Parameters
----------
G : NetworkX graph
source : node, optional
Starting node for path.
If not specified compute shortest paths for all connected node pairs.
target : node, optional
Ending node for path.
If not specified compute shortest paths for every node reachable
from the source.
weight : None or string, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
Returns
-------
path: list or dictionary
If the source and target are both specified return a single list
of nodes in a shortest path.
If only the source is specified return a dictionary keyed by
targets with a list of nodes in a shortest path.
If neither the source or target is specified return a dictionary
of dictionaries with path[source][target]=[list of nodes in path].
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.shortest_path(G,source=0,target=4))
[0, 1, 2, 3, 4]
>>> p=nx.shortest_path(G,source=0) # target not specified
>>> p[4]
[0, 1, 2, 3, 4]
>>> p=nx.shortest_path(G) # source,target not specified
>>> p[0][4]
[0, 1, 2, 3, 4]
Notes
-----
There may be more than one shortest path between a source and target.
This returns only one of them.
For digraphs this returns a shortest directed path.
To find paths in the reverse direction first use G.reverse(copy=False)
to flip the edge orientation.
See Also
--------
all_pairs_shortest_path()
all_pairs_dijkstra_path()
single_source_shortest_path()
single_source_dijkstra_path()
"""
if source is None:
if target is None:
if weight is None:
paths=nx.all_pairs_shortest_path(G)
else:
paths=nx.all_pairs_dijkstra_path(G,weight=weight)
else:
raise nx.NetworkXError(\
"Target given but no source specified.")
else: # source specified
if target is None:
if weight is None:
paths=nx.single_source_shortest_path(G,source)
else:
paths=nx.single_source_dijkstra_path(G,source,weight=weight)
else:
# shortest source-target path
if weight is None:
paths=nx.bidirectional_shortest_path(G,source,target)
else:
paths=nx.dijkstra_path(G,source,target,weight)
return paths
def shortest_path_length(G, source=None, target=None, weight=None):
"""Compute shortest path lengths in the graph.
This function can compute the single source shortest path
lengths by specifying only the source or all pairs shortest
path lengths by specifying neither the source or target.
Parameters
----------
G : NetworkX graph
source : node, optional
Starting node for path.
If not specified compute shortest path lengths for all
connected node pairs.
target : node, optional
Ending node for path.
If not specified compute shortest path lengths for every
node reachable from the source.
weight : None or string, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
Returns
-------
length : number, or container of numbers
If the source and target are both specified return a
single number for the shortest path.
If only the source is specified return a dictionary keyed by
targets with a the shortest path as keys.
If neither the source or target is specified return a dictionary
of dictionaries with length[source][target]=value.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.shortest_path_length(G,source=0,target=4))
4
>>> p=nx.shortest_path_length(G,source=0) # target not specified
>>> p[4]
4
>>> p=nx.shortest_path_length(G) # source,target not specified
>>> p[0][4]
4
Notes
-----
For digraphs this returns the shortest directed path.
To find path lengths in the reverse direction use G.reverse(copy=False)
first to flip the edge orientation.
See Also
--------
all_pairs_shortest_path_length()
all_pairs_dijkstra_path_length()
single_source_shortest_path_length()
single_source_dijkstra_path_length()
"""
if source is None:
if target is None:
if weight is None:
paths=nx.all_pairs_shortest_path_length(G)
else:
paths=nx.all_pairs_dijkstra_path_length(G, weight=weight)
else:
raise nx.NetworkXError("Target given but no source specified.")
else: # source specified
if target is None:
if weight is None:
paths=nx.single_source_shortest_path_length(G,source)
else:
paths=nx.single_source_dijkstra_path_length(G,source,weight=weight)
else:
# shortest source-target path
if weight is None:
p=nx.bidirectional_shortest_path(G,source,target)
paths=len(p)-1
else:
paths=nx.dijkstra_path_length(G,source,target,weight)
return paths
def average_shortest_path_length(G, weight=None):
r"""Return the average shortest path length.
The average shortest path length is
.. math::
a =\sum_{s,t \in V} \frac{d(s, t)}{n(n-1)}
where `V` is the set of nodes in `G`,
`d(s, t)` is the shortest path from `s` to `t`,
and `n` is the number of nodes in `G`.
Parameters
----------
G : NetworkX graph
weight : None or string, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
Raises
------
NetworkXError:
if the graph is not connected.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.average_shortest_path_length(G))
2.0
For disconnected graphs you can compute the average shortest path
length for each component:
>>> G=nx.Graph([(1,2),(3,4)])
>>> for g in nx.connected_component_subgraphs(G):
... print(nx.average_shortest_path_length(g))
1.0
1.0
"""
if G.is_directed():
if not nx.is_weakly_connected(G):
raise nx.NetworkXError("Graph is not connected.")
else:
if not nx.is_connected(G):
raise nx.NetworkXError("Graph is not connected.")
avg=0.0
if weight is None:
for node in G:
path_length=nx.single_source_shortest_path_length(G, node)
avg += sum(path_length.values())
else:
for node in G:
path_length=nx.single_source_dijkstra_path_length(G, node, weight=weight)
avg += sum(path_length.values())
n=len(G)
return avg/(n*(n-1))
def all_shortest_paths(G, source, target, weight=None):
"""Compute all shortest paths in the graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
target : node
Ending node for path.
weight : None or string, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
Returns
-------
paths: generator of lists
A generator of all paths between source and target.
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2])
>>> G.add_path([0,10,2])
>>> print([p for p in nx.all_shortest_paths(G,source=0,target=2)])
[[0, 1, 2], [0, 10, 2]]
Notes
-----
There may be many shortest paths between the source and target.
See Also
--------
shortest_path()
single_source_shortest_path()
all_pairs_shortest_path()
"""
if weight is not None:
pred,dist = nx.dijkstra_predecessor_and_distance(G,source,weight=weight)
else:
pred = nx.predecessor(G,source)
if target not in pred:
raise nx.NetworkXNoPath()
stack = [[target,0]]
top = 0
while top >= 0:
node,i = stack[top]
if node == source:
yield [p for p,n in reversed(stack[:top+1])]
if len(pred[node]) > i:
top += 1
if top == len(stack):
stack.append([pred[node][i],0])
else:
stack[top] = [pred[node][i],0]
else:
stack[top-1][1] += 1
top -= 1
|
the-stack_106_16319
|
# -*- coding: utf-8 -*-
import torch
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
|
the-stack_106_16320
|
from docplex.mp.model import Model
import docplex.mp.solution as Solucion
import numpy as np
print(Solucion)
n=11
ciudades=[i for i in range(n)] # Creamos ciudades de la 0 a la 9
arcos =[(i,j) for i in ciudades for j in ciudades if i!=j]
random=np.random
random.seed(1)
coord_x=random.rand(n)*100
coord_y=random.rand(n)*100
distancia={(i, j): np.hypot(coord_x[i] - coord_x[j], coord_y[i] - coord_y[j]) for i,j in arcos}
mdl=Model('TSP')
x=mdl.binary_var_dict(arcos,name='x')
d=mdl.continuous_var_dict(ciudades,name='d')
print(list(filter(lambda el: el.count("var_dict") > 0 , dir(mdl))))
mdl.minimize(mdl.sum(distancia[i]*x[i] for i in arcos))
for c in ciudades:
mdl.add_constraint(mdl.sum(x[(i,j)] for i,j in arcos if i==c)==1,
ctname='out_%d'%c)
for c in ciudades:
mdl.add_constraint(mdl.sum(x[(i,j)] for i,j in arcos if j==c)==1,
ctname='in_%d'%c)
for i,j in arcos:
if j!=0:
mdl.add_indicator(x[(i,j)],d[i]+1==d[j],
name='order_(%d,_%d)'%(i, j))
mdl.parameters.timelimit=120
mdl.parameters.mip.strategy.branch=1
mdl.parameters.mip.tolerances.mipgap=0.15
solucion = mdl.solve(log_output=True)
|
the-stack_106_16321
|
#!/usr/bin/env python3
import math
import re
import itertools
import networkx as nx
def parse(line):
m = re.match(r'(\S+) to (\S+) = (\d+)', line)
if m:
return m.group(1), m.group(2), int(m.group(3))
def part1(filename):
with open(filename) as f:
lines = f.readlines()
G = nx.Graph()
for line in lines:
v0, v1, d = parse(line)
G.add_edge(v0, v1, distance=d)
minDistance = math.inf
for path in itertools.permutations(G.nodes()):
distance = 0
for i in range(len(path) - 1):
distance += G[path[i]][path[i + 1]]['distance']
if distance < minDistance:
minDistance = distance
print(minDistance)
def part2(filename):
with open(filename) as f:
lines = f.readlines()
G = nx.Graph()
for line in lines:
v0, v1, d = parse(line)
G.add_edge(v0, v1, distance=d)
maxDistance = -math.inf
for path in itertools.permutations(G.nodes()):
distance = 0
for i in range(len(path) - 1):
distance += G[path[i]][path[i + 1]]['distance']
if distance > maxDistance:
maxDistance = distance
print(maxDistance)
if __name__ == '__main__':
part1('day09input.txt')
part2('day09input.txt')
|
the-stack_106_16323
|
from datapackage_pipelines.wrapper import ingest, spew
import logging
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
bills = {}
israel_law_bill_ids = {}
for bill in next(resources):
bill['law_ministry_ids'] = []
bills[bill['BillID']] = bill
if bill['IsraelLawID']:
for israel_law_id in bill['IsraelLawID']:
israel_law_bill_ids.setdefault(israel_law_id, [])
israel_law_bill_ids[israel_law_id].append(bill['BillID'])
for law_ministry in next(resources):
for bill_id in israel_law_bill_ids.get(law_ministry['IsraelLawID'], []):
if law_ministry['GovMinistryID'] not in bills[bill_id]['law_ministry_ids']:
bills[bill_id]['law_ministry_ids'].append(law_ministry['GovMinistryID'])
gov_ministries = {}
for gov_ministry in next(resources):
gov_ministries[gov_ministry['GovMinistryID']] = gov_ministry['Name']
for bill in bills.values():
ministry_names = set()
for ministry_id in bill['law_ministry_ids']:
ministry_names.add(gov_ministries[ministry_id])
bill['law_ministry_names'] = ', '.join(ministry_names)
datapackage["resources"] = [datapackage['resources'][0]]
fields = [{'name': 'law_ministry_ids', 'type': 'array'},
{'name': 'law_ministry_names', 'type': 'string'}]
datapackage["resources"][0]['schema']['fields'] += fields
spew(datapackage, [bills.values()], stats)
if __name__ == '__main__':
main()
|
the-stack_106_16324
|
import os
import dcp.utils as utils
import torch
import torch.nn as nn
__all__ = ["CheckPoint"]
class CheckPoint(object):
"""
save model state to file
check_point_params: model, optimizer, epoch
"""
def __init__(self, save_path, logger):
self.save_path = os.path.join(save_path, "check_point")
self.check_point_params = {'model': None,
'optimizer': None,
'epoch': None}
self.logger = logger
# make directory
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
def load_state(self, model, state_dict):
"""
load state_dict to model
:params model:
:params state_dict:
:return: model
"""
model.eval()
model_dict = model.state_dict()
for key, value in list(state_dict.items()):
if key in list(model_dict.keys()):
model_dict[key] = value
else:
if self.logger:
self.logger.error("key error: {} {}".format(key, type(value)))
# assert False
model.load_state_dict(model_dict)
return model
def load_model(self, model_path):
"""
load model
:params model_path: path to the model
:return: model_state_dict
"""
if os.path.isfile(model_path):
if self.logger:
self.logger.info("|===>Load retrain model from: {}".format(model_path))
model_state_dict = torch.load(model_path, map_location={'cuda:1': 'cuda:0'})
return model_state_dict
else:
assert False, "file not exits, model path: " + model_path
def load_checkpoint(self, checkpoint_path):
"""
load checkpoint file
:params checkpoint_path: path to the checkpoint file
:return: model_state_dict, optimizer_state_dict, epoch
"""
if os.path.isfile(checkpoint_path):
if self.logger:
self.logger.info("|===>Load resume check-point from: {}".format(checkpoint_path))
self.check_point_params = torch.load(checkpoint_path)
model_state_dict = self.check_point_params['model']
optimizer_state_dict = self.check_point_params['optimizer']
epoch = self.check_point_params['epoch']
return model_state_dict, optimizer_state_dict, epoch
else:
assert False, "file not exits" + checkpoint_path
def save_checkpoint(self, model, optimizer, epoch, index=0):
"""
:params model: model
:params optimizer: optimizer
:params epoch: training epoch
:params index: index of saved file, default: 0
Note: if we add hook to the grad by using register_hook(hook), then the hook function
can not be saved so we need to save state_dict() only. Although save state dictionary
is recommended, some times we still need to save the whole model as it can save all
the information of the trained model, and we do not need to create a new network in
next time. However, the GPU information will be saved too, which leads to some issues
when we use the model on different machine
"""
# get state_dict from model and optimizer
model = utils.list2sequential(model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.state_dict()
optimizer = optimizer.state_dict()
# save information to a dict
self.check_point_params['model'] = model
self.check_point_params['optimizer'] = optimizer
self.check_point_params['epoch'] = epoch
# save to file
torch.save(self.check_point_params, os.path.join(
self.save_path, "checkpoint_{:0>3d}.pth".format(index)))
def save_model(self, model, best_flag=False, index=0, tag=""):
"""
:params model: model to save
:params best_flag: if True, the saved model is the one that gets best performance
"""
# get state dict
model = utils.list2sequential(model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.state_dict()
if best_flag:
if tag != "":
torch.save(model, os.path.join(self.save_path, "{}_best_model.pth".format(tag)))
else:
torch.save(model, os.path.join(self.save_path, "best_model.pth"))
else:
if tag != "":
torch.save(model, os.path.join(self.save_path, "{}_model_{:0>3d}.pth".format(tag, index)))
else:
torch.save(model, os.path.join(self.save_path, "model_{:0>3d}.pth".format(index)))
|
the-stack_106_16325
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from friendship.exceptions import AlreadyExistsError
from friendship.models import Follow
from .models import Image, Profile, Comments
from .forms import NewImageForm, CreateProfileForm, CommentForm
@login_required(login_url='signup')
def index(request):
comments = Comments.objects.all()
form = CommentForm()
mine = Profile.objects.get(user=request.user.id)
profiles = Profile.objects.all()
user = request.user
following = Follow.objects.following(user)
images =[]
for follower in following:
wenyewe = follower.id
images+=Image.objects.filter(owner=wenyewe).order_by('-pub_date')
return render(request, 'index.html',{'following':following,'images':images,"profiles":profiles, "mine":mine,"comment":form, "comments":comments, 'user':user})
@login_required
def upload(request):
current_user = request.user
if request.method == 'POST':
form = NewImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.owner = current_user
image.save()
return redirect(index)
else:
form = NewImageForm()
return render(request, 'new_image.html', {"form": form})
@login_required
def profile(request):
current_user = request.user.id
user = request.user
profile = Profile.objects.get(user=current_user)
images = Image.objects.filter(owner=current_user).order_by('-pub_date')
followers = len(Follow.objects.followers(user))
following = len(Follow.objects.following(user))
posts = len(Image.objects.filter(owner=user))
return render(request, 'profile.html', {"images": images, 'user': request.user, "profile": profile,"followers":followers,"following":following,"posts":posts})
@login_required
def explore(request):
images = Image.objects.all().order_by('-pub_date')
return render(request, 'explore.html', {"images": images})
@login_required
def userprofile(request, user_id):
users = User.objects.get(id=user_id)
profile = Profile.objects.get(user=users)
images = Image.objects.filter(owner=users).order_by('-pub_date')
followers = len(Follow.objects.followers(users))
following = len(Follow.objects.following(users))
posts = len(Image.objects.filter(owner=users))
people_following = Follow.objects.following(request.user)
return render(request, 'profile/userprofile.html', {"user": users, "profile": profile, "images": images,"followers":followers, "following":following, "posts":posts, "people_following":people_following})
def follow(request, user_id):
users = User.objects.get(id=user_id)
try:
follow = Follow.objects.add_follower(request.user, users)
except AlreadyExistsError:
return render(request, "followed.html")
# return render(request, 'profile/userprofile.html', {"follow": follow})
return redirect('/userprofile/'+user_id)
@login_required
def create_profile(request):
current_user = request.user
if request.method == 'POST':
form = CreateProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('/profile')
else:
form = CreateProfileForm()
return render(request, 'registration/activated.html', {"form": form})
def search_results(request):
if 'profile' in request.GET and request.GET["profile"]:
search_term = request.GET.get("profile")
searched_profile = Profile.search_by_username(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"profile": searched_profile})
else:
message = "You haven't searched for any user"
return render(request, 'search.html',{"message":message})
@login_required
def comment(request,image_id):
if request.method == 'POST':
image = get_object_or_404(Image, pk = image_id)
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save(commit=False)
comment.commenter = request.user
comment.image_id = image
comment.save()
return redirect(index)
else:
form = CommentForm()
return render(request, 'index.html',{'comment':form})
|
the-stack_106_16326
|
#
# Copyright (c) 2018 Institute for Basic Science
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import mappy
from pysam import FUNMAP, FREVERSE, FSECONDARY, FSUPPLEMENTARY
from pysam import AlignmentFile, AlignedSegment
from struct import pack, unpack, calcsize
from collections import defaultdict
from threading import Lock
from .utils import ensure_dir_exists
MM_IDX_MAGIC = b"MMI\2"
def check_minimap2_index(filename):
with open(filename, 'rb') as idxf:
magic = idxf.read(4)
if magic != MM_IDX_MAGIC:
raise Exception('File magic is not found from ' + filename)
class BAMWriter:
def __init__(self, output, indexed_sequence_list, index_options):
header = self.build_header(indexed_sequence_list, index_options)
ensure_dir_exists(output)
self.writer = AlignmentFile(output, 'wb', header=header)
self.lock = Lock()
def __del__(self):
self.close()
def close(self):
if hasattr(self, 'writer'):
self.writer.close()
del self.writer
def build_header(self, indexed_sequence_list, index_options):
return {'SQ': indexed_sequence_list,
'PG': [{'ID': 'minimap2', 'PN': 'minimap2',
'CL': index_options, 'DS': 'minimap2 invoked by poreplex'}]}
def write(self, fields):
line = '\t'.join(map(str, fields))
segment = AlignedSegment.fromstring(line, self.writer.header)
with self.lock:
self.writer.write(segment)
class AlignmentWriter:
def __init__(self, indexfile, output, output_layout):
self.aligner = mappy.Aligner(indexfile)
if not self.aligner:
raise Exception('Could not open minimap2 index {}.'.format(indexfile))
self.writers = self.open_writers(indexfile, output, output_layout)
def open_writers(self, indexfile, output, output_layout):
indexed_sequences, index_options = list(self.get_indexed_sequence_list(indexfile))
return {muxid: BAMWriter(output.format(name), indexed_sequences, index_options)
for muxid, name in output_layout.items()}
def close(self):
for muxid, writer in self.writers.items():
writer.close()
self.writers.clear()
def __del__(self):
self.close()
def get_indexed_sequence_list(self, indexfile):
seqlist = []
with open(indexfile, 'rb') as idxf:
magic = idxf.read(4)
if magic != MM_IDX_MAGIC:
raise Exception('File magic is not found from ' + filename)
header_format = '<IIIII'
header_size = calcsize(header_format)
header = idxf.read(header_size)
if len(header) != header_size:
raise Exception('Unexpected end of file during reading a header: ' + filename)
w, k, b, n_seq, flag = unpack(header_format, header)
index_options = 'minimap2 -w {} -k {}'.format(w, k)
for i in range(n_seq):
namlen = idxf.read(1)[0]
name_seqlen = idxf.read(namlen + 4)
name = name_seqlen[:-4].decode()
seqlen = unpack('<I', name_seqlen[-4:])[0]
seqlist.append({'LN': seqlen, 'SN': name})
return seqlist, index_options
def map(self, name, seq, qual):
seq = seq.replace('U', 'T')
seqmaps = list(self.aligner.map(seq))
if not seqmaps:
yield (name, int(FUNMAP), '*', 0, 0, '*', '*', 0, 0, seq, qual)
return
for i, h in enumerate(seqmaps):
if i > 0:
flag = int(FSECONDARY)
elif not h.is_primary:
flag = int(FSUPPLEMENTARY)
else:
flag = 0
leftclip = '{}S'.format(h.q_st) if h.q_st > 0 else ''
rightclip = '{}S'.format(len(seq) - h.q_en) if h.q_en < len(seq) else ''
if h.strand > 0:
seq_f = seq
qual_f = qual
else:
seq_f = mappy.revcomp(seq)
qual_f = qual[::-1]
leftclip, rightclip = rightclip, leftclip
flag |= FREVERSE
fullcigar = leftclip + h.cigar_str + rightclip
yield (name, flag, h.ctg, h.r_st + 1, h.mapq, fullcigar, '*',
0, 0, seq_f, qual_f, 'NM:i:{}'.format(h.NM))
def map_and_write(self, streamid, name, seq, qual, adapter_length):
writer = self.writers[streamid]
mapped_seqname = None
if adapter_length > 0:
seq = seq[:-adapter_length]
qual = qual[:-adapter_length]
for row in self.map(name, seq, qual):
if mapped_seqname is None:
mapped_seqname = row[2]
writer.write(row)
if mapped_seqname is not None and not mapped_seqname.startswith('|'):
mapped_seqname = mapped_seqname.split('|')[0]
return mapped_seqname
def process(self, results):
mapped_seqs = defaultdict(list)
failed_counts = defaultdict(int)
unmapped_counts = defaultdict(int)
for result in results:
barcode = result.get('barcode')
streamid = result.get('label', 'fail'), barcode
if result.get('sequence') is None or 'read_id' not in result:
failed_counts[barcode] += 1
else:
mapped = self.map_and_write(streamid, result['read_id'], *result['sequence'])
if mapped == '*':
unmapped_counts[barcode] += 1
else:
mapped_seqs[barcode].append(mapped)
return {'mapped': mapped_seqs, 'failed': failed_counts, 'unmapped': unmapped_counts}
|
the-stack_106_16328
|
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from models import *
# Prune settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')
parser.add_argument('--dataset', type=str, default='cifar10',
help='training dataset (default: cifar10)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--depth', type=int, default=16,
help='depth of the vgg')
parser.add_argument('--model', default='', type=str, metavar='PATH',
help='path to the model (default: none)')
parser.add_argument('--save', default='.', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = vgg(dataset=args.dataset, depth=args.depth)
if args.cuda:
model.cuda()
if args.model:
if os.path.isfile(args.model):
print("=> loading checkpoint '{}'".format(args.model))
checkpoint = torch.load(args.model)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.model, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
print('Pre-processing Successful!')
# simple test model after Pre-processing prune (simple set BN scales to zeros)
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0.
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
with torch.no_grad():
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
acc = test(model)
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
cfg_mask = []
layer_id = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
out_channels = m.weight.data.shape[0]
if out_channels == cfg[layer_id]:
cfg_mask.append(torch.ones(out_channels))
layer_id += 1
continue
weight_copy = m.weight.data.abs().clone()
weight_copy = weight_copy.cpu().numpy()
L1_norm = np.sum(weight_copy, axis=(1, 2, 3))
arg_max = np.argsort(L1_norm)
arg_max_rev = arg_max[::-1][:cfg[layer_id]]
assert arg_max_rev.size == cfg[layer_id], "size of arg_max_rev not correct"
mask = torch.zeros(out_channels)
mask[arg_max_rev.tolist()] = 1
cfg_mask.append(mask)
layer_id += 1
elif isinstance(m, nn.MaxPool2d):
layer_id += 1
newmodel = vgg(dataset=args.dataset, cfg=cfg)
if args.cuda:
newmodel.cuda()
start_mask = torch.ones(3)
layer_id_in_cfg = 0
end_mask = cfg_mask[layer_id_in_cfg]
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.BatchNorm2d):
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1,(1,))
m1.weight.data = m0.weight.data[idx1.tolist()].clone()
m1.bias.data = m0.bias.data[idx1.tolist()].clone()
m1.running_mean = m0.running_mean[idx1.tolist()].clone()
m1.running_var = m0.running_var[idx1.tolist()].clone()
layer_id_in_cfg += 1
start_mask = end_mask
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
m1.weight.data = w1.clone()
elif isinstance(m0, nn.Linear):
if layer_id_in_cfg == len(cfg_mask):
idx0 = np.squeeze(np.argwhere(np.asarray(cfg_mask[-1].cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
m1.weight.data = m0.weight.data[:, idx0].clone()
m1.bias.data = m0.bias.data.clone()
layer_id_in_cfg += 1
continue
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
elif isinstance(m0, nn.BatchNorm1d):
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
m1.running_mean = m0.running_mean.clone()
m1.running_var = m0.running_var.clone()
torch.save({'cfg': cfg, 'state_dict': newmodel.state_dict()}, os.path.join(args.save, 'pruned.pth.tar'))
print(newmodel)
model = newmodel
acc = test(model)
num_parameters = sum([param.nelement() for param in newmodel.parameters()])
with open(os.path.join(args.save, "prune.txt"), "w") as fp:
fp.write("Number of parameters: \n"+str(num_parameters)+"\n")
fp.write("Test accuracy: \n"+str(acc)+"\n")
|
the-stack_106_16335
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import subprocess
sys.path.append( os.path.join(os.path.dirname(__file__), '..') )
from plasma.client.client import Client
from plasma_tools.config import tools_config
def process_cmd(command, raise_exception=True):
command = "python plasma_tools/cli.py %s" % command
print("cmd: " + command)
status, output = subprocess.getstatusoutput(command)
if status != 0 and raise_exception:
raise Exception("None zero return code")
print(output)
return status, output
client = Client()
def main():
# process_cmd("submitblock 3bb369fecdc16b93b99514d8ed9c2e87c5824cf4a6a98d2e8e91b7dd0c063304")
utxos = client.get_utxo(sys.argv[2], "latest")
for blknum, txindex, oindex, contractaddress, amount, tokenid in utxos:
if contractaddress.lower() == tools_config["ERC20_CONTRACT_ADDRESS"][2:].lower():
if amount >= 1:
process_cmd("sendtx {0} {1} {2} 0 0 0 {4} {5} 1 0 {6} {8} {3} 0 {7} {7}".format(
blknum, txindex, oindex,
amount - 1, sys.argv[1],
tools_config["ERC20_CONTRACT_ADDRESS"],
sys.argv[2] if amount - 1 > 0 else "0x0", sys.argv[3],
tools_config["ERC20_CONTRACT_ADDRESS"] if amount - 1 > 0 else "0x0"
))
break
else:
raise ValueError("no available utxo")
if __name__ == '__main__':
main()
|
the-stack_106_16338
|
# -*- test-case-name: xquotient.test.historic.test_mta3to4 -*-
from axiom.test.historic.stubloader import saveStub
from axiom.dependency import installOn
from xquotient.mail import MailTransferAgent
def createDatabase(store):
"""
Create a MailTransferAgent with both SMTP and SMTP/SSL configured in the
given Store.
"""
mta = MailTransferAgent(
store=store,
portNumber=5025, securePortNumber=5465,
certificateFile='server.pem',
messageCount=432,
domain='example.net')
store.dbdir.child('server.pem').setContent('--- PEM ---\n')
installOn(mta, store)
if __name__ == '__main__':
saveStub(createDatabase, 11023)
|
the-stack_106_16340
|
from sqlalchemy.orm import mapper, relationship
from sqlalchemy import Table, Column, Date, Integer, String, MetaData, ForeignKey
from domain import model
metadata = MetaData()
order_lines = Table(
'order_lines', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('orderid', String(255)),
Column('sku', String(255)),
Column('qty', Integer, nullable=False),
)
batches = Table(
'batches', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('reference', String(255)),
Column('sku', String(255)),
Column('_purchased_quantity', Integer, nullable=False),
Column('eta', Date, nullable=True),
)
allocations = Table(
'allocations', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('orderline_id', ForeignKey('order_lines.id')),
Column('batch_id', ForeignKey('batches.id')),
)
def start_mappers():
lines_mapper = mapper(model.OrderLine, order_lines)
mapper(model.Batch, batches, properties={
'_allocations': relationship(
lines_mapper,
secondary=allocations,
collection_class=set,
)
})
|
the-stack_106_16341
|
from django.urls import NoReverseMatch
from django.utils import html
from corehq.apps.api.es import ReportCaseESView, ReportFormESView
from corehq.apps.es import filters
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.users.models import CommCareUser
from corehq.elastic import SIZE_LIMIT
from pact.enums import PACT_DOMAIN, PACT_HP_CHOICES, PACT_DOT_CHOICES, PACT_CASE_TYPE
from pact.reports import PactElasticTabularReportMixin
from pact.reports.dot import PactDOTReport
from pact.reports.patient import PactPatientInfoReport
from pact.utils import query_per_case_submissions_facet, get_base_case_es_query
class PactPrimaryHPField(BaseSingleOptionFilter):
slug = "primary_hp"
label = "PACT HPs"
default_text = "All CHWs"
@property
def options(self):
chws = list(self.get_chws())
return [(c['val'], c['text']) for c in chws]
@classmethod
def get_chws(cls):
users = CommCareUser.by_domain(PACT_DOMAIN)
for x in users:
#yield dict(val=x._id, text=x.raw_username)
yield dict(val=x.raw_username, text=x.raw_username)
# self.options = [dict(val=case['_id'], text="(%s) - %s" % (case['pactid'], case['name'])) for case in patient_cases]
class HPStatusField(BaseSingleOptionFilter):
slug = "hp_status"
label = "HP Status"
default_text = "All Active HP"
ANY_HP = "any_hp"
@property
def options(self):
options = [(self.ANY_HP, "All Active HP")]
options.extend(PACT_HP_CHOICES)
return options
class DOTStatus(BaseSingleOptionFilter):
slug = "dot_status"
label = "DOT Status"
default_text = "All"
ANY_DOT = "any_dot"
@property
def options(self):
options = [(self.ANY_DOT, "Any DOT")]
options.extend(PACT_DOT_CHOICES[:3])
return options
class PatientListDashboardReport(PactElasticTabularReportMixin):
name = "All Patients"
slug = "patients"
ajax_pagination = True
asynchronous = True
default_sort = {"pactid": "asc"}
report_template_path = "reports/tabular.html"
flush_layout = True
fields = [
'pact.reports.patient_list.PactPrimaryHPField',
'pact.reports.patient_list.HPStatusField',
'pact.reports.patient_list.DOTStatus',
]
case_es = ReportCaseESView(PACT_DOMAIN)
xform_es = ReportFormESView(PACT_DOMAIN)
def get_pact_cases(self):
query = (get_base_case_es_query(0, None)
.source(['_id', 'name', 'pactid.#value'])
.raw_query)
results = self.case_es.run_query(query)
for res in results['hits']['hits']:
yield res
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("PACT ID", prop_name="pactid.#value"),
DataTablesColumn("Name", prop_name="name", sortable=False, span=3),
DataTablesColumn("Primary HP", prop_name="hp.#value"),
DataTablesColumn("Opened On", prop_name="opened_on"),
DataTablesColumn("Last Modified", prop_name="modified_on"),
DataTablesColumn("HP Status", prop_name="hp_status.#value"),
DataTablesColumn("DOT Status", prop_name='dot_status.#value'),
DataTablesColumn("Status", prop_name="closed"),
DataTablesColumn("Submissions", sortable=False),
)
return headers
def case_submits_facet_dict(self, limit):
query = query_per_case_submissions_facet(self.request.domain, limit=limit)
results = self.xform_es.run_query(query)
case_id_count_map = {}
for f in results['facets']['case_submissions']['terms']:
case_id_count_map[f['term']] = f['count']
return case_id_count_map
@property
def rows(self):
"""
Override this method to create a functional tabular report.
Returns 2D list of rows.
[['row1'],[row2']]
"""
def _format_row(row_field_dict):
yield row_field_dict.get("pactid.#value", '---').replace('_', ' ').title()
yield self.pact_case_link(row_field_dict['_id'], row_field_dict.get("name", "---")),
yield row_field_dict.get("hp.#value", "---")
yield self.format_date(row_field_dict.get("opened_on"))
yield self.format_date(row_field_dict.get("modified_on"))
yield self.render_hp_status(row_field_dict.get("hp_status.#value"))
yield self.pact_dot_link(row_field_dict['_id'], row_field_dict.get("dot_status.#value"))
#for closed on, do two checks:
if row_field_dict.get('closed', False):
#it's closed
yield "Closed (%s)" % self.format_date(row_field_dict.get('closed_on'))
else:
yield "Active"
yield facet_dict.get(row_field_dict['_id'], 0)
res = self.es_results
if 'error' in res:
pass
else:
#hack, do a facet query here
facet_dict = self.case_submits_facet_dict(SIZE_LIMIT)
for result in res['hits']['hits']:
yield list(_format_row(result))
@property
def es_results(self):
fields = [
"_id",
"name",
"pactid.#value",
"opened_on",
"modified_on",
"hp_status.#value",
"hp.#value",
"dot_status.#value",
"closed_on",
"closed"
]
full_query = (get_base_case_es_query(self.pagination.start, self.pagination.count)
.filter(filters.term('type', PACT_CASE_TYPE))
.source(fields)
)
def status_filtering(slug, field, prefix, any_field, default):
if self.request.GET.get(slug, None) is not None:
field_status_filter_query = self.request.GET[slug]
if field_status_filter_query == "":
#silly double default checker here - set default or the any depending on preference
field_status_filter_query = default
if field_status_filter_query is None:
return
else:
if field_status_filter_query.startswith(prefix):
field_status_prefix = field_status_filter_query
elif field_status_filter_query == any_field:
field_status_prefix = prefix
else:
field_status_prefix = None
full_query = full_query.filter(filters.term(field, field_status_filter_query.lower()))
if field_status_prefix is not None:
field_filter = {"prefix": {field: field_status_prefix.lower()}}
full_query = full_query.add_query(field_filter)
status_filtering(DOTStatus.slug, "dot_status.#value", "DOT", DOTStatus.ANY_DOT, None)
status_filtering(HPStatusField.slug, "hp_status.#value", "HP", HPStatusField.ANY_HP, HPStatusField.ANY_HP)
#primary_hp filter from the user filter
if self.request.GET.get(PactPrimaryHPField.slug, "") != "":
primary_hp_term = self.request.GET[PactPrimaryHPField.slug]
primary_hp_filter = {"term": {"hp.#value": primary_hp_term}}
full_query = full_query.filter(filters.term("hp.#value", primary_hp_term))
full_query['sort'] = self.get_sorting_block()
return self.case_es.run_query(full_query.raw_query)
def pact_case_link(self, case_id, name):
try:
return html.mark_safe("<a class='ajax_dialog' href='%s'>%s</a>" % (
html.escape(
PactPatientInfoReport.get_url(*[self.domain]) + "?patient_id=%s" % case_id),
html.escape(name),
))
except NoReverseMatch:
return "%s (bad ID format)" % name
def render_hp_status(self, status):
if status is None or status == '':
return ''
else:
if status.lower() == 'discharged':
css = 'label'
else:
css = 'label label-info'
return '<span class="%s">%s</span>' % (css, status)
def pact_dot_link(self, case_id, status):
if status is None or status == '':
return ''
try:
return html.mark_safe("<span class='label label-info'>%s</span> <a class='ajax_dialog' href='%s'>Report</a>" % (
html.escape(status),
html.escape(
PactDOTReport.get_url(*[self.domain]) + "?dot_patient=%s" % case_id),
))
except NoReverseMatch:
return "%s (bad ID format)" % status
|
the-stack_106_16342
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, opt, ntoken):
super(RNNModel, self).__init__()
self.opt = opt
# set hyperparameters from opt
rnn_type = opt['rnn_class']
ninp = opt['embeddingsize']
nhid = opt['hiddensize']
nlayers = opt['numlayers']
dropout = opt['dropout']
tie_weights = opt['emb_tied']
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp, padding_idx=0)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError(
"An invalid option for `--model` was supplied, options are "
"['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"
)
self.rnn = nn.RNN(
ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout
)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for
# Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError(
'When using the tied flag, nhid must be equal to emsize'
)
self.decoder.weight = self.encoder.weight
# initialize the weights of the model
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, hidden, no_pack=False):
emb = self.drop(self.encoder(input))
# if eval, pack padded sequence (we don't pack during training because
# we have no padding in our input samples)
if not self.training and not no_pack:
emb_lens = [x for x in torch.sum((input > 0).int(), dim=0).data]
emb_packed = pack_padded_sequence(emb, emb_lens, batch_first=False)
packed_output, hidden = self.rnn(emb_packed, hidden)
output, _ = pad_packed_sequence(packed_output, batch_first=False)
else:
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
|
the-stack_106_16343
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'asa'
reply = self.get(b'show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version (\S+),', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'^Model Id:\s+(.+) \(revision', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'^(.+) up', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running', format='text'):
if source not in ('running', 'startup'):
return self.invalid_params("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = b'show running-config all'
else:
cmd = b'show startup-config'
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain([b'configure terminal'], to_list(command), [b'end']):
self.send_command(cmd)
def get(self, command, prompt=None, answer=None, sendonly=False, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all)
def get_capabilities(self):
result = {}
result['rpc'] = self.get_base_rpc()
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
return json.dumps(result)
|
the-stack_106_16345
|
# -*- coding: utf-8 -*-
"""
Tests of DAK handling.
"""
import sys
import shutil
import time
import platform
import os
import json
import unittest
import datetime
# Disable all import warnings since the imports are pretty hacky
#pylint: disable=import-error,wrong-import-order,wrong-import-position
sys.path.append('../lib') # add lib to path for unit-testing
sys.path.append('..') # add parent to path for unit-testing
sys.path.append('./lib') # add lib to path for unit-testing
sys.path.append('.') # add current dir to path for unit-testing
import xmlschema
import jsonpickle
from openpyxl import load_workbook
from flask import Flask, render_template
from jsonreport import JsonReport
from dakdata import DakData, Deltagare, Sammankomst
if platform.system() == 'Windows':
# Add app engine paths on windows.
sys.path.append("C:/Program Files (x86)/Google/google_appengine")
sys.path.append("C:/Program Files (x86)/Google/google_appengine/lib")
sys.path.append("c:/Program Files (x86)/Google/google_appengine/google/appengine/api")
sys.path.append("c:/Program Files (x86)/Google/google_appengine/google/appengine")
elif platform.system() == 'Darwin': # i.e. MacOS
BASE = "/usr/local/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/"
sys.path.append(BASE + "platform/google_appengine/lib/fancy_urllib")
sys.path.append(BASE + "platform/google_appengine/lib")
sys.path.append(BASE + "platform/google_appengine/api")
sys.path.append(BASE + "platform/google_appengine")
sys.path.append(BASE + "lib/third_party")
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from excelreport import ExcelReport
from data import Semester
class XmlValidator():
"XML validation"
def __init__(self, xsdPath):
self.xmlschema = xmlschema.XMLSchema(xsdPath)
def validate(self, xml):
"Run the validator"
self.xmlschema.validate(xml)
def create_dak_data():
"Creates test DAK data"
dak = DakData()
dak.foerenings_namn = u"Test Scoutkår"
dak.forenings_id = "1111"
dak.organisationsnummer = "556677-8899"
dak.kommun_id = "0"
dak.kort.namn_paa_kort = "Testavdelning"
dak.kort.naervarokort_nummer = "1"
dak.kort.aktivitet = u'Moete'
#
dak.kort.ledare.append(Deltagare("1234", "Adam", "Adamsson", u"198501011234", True, "[email protected]", "12345678", u"Göteborg"))
dak.kort.deltagare.append(Deltagare("1235", "Bertil", "Bertilsson", u"200501011234", False, "[email protected]", "12345678", u"Göteborg"))
dak.kort.deltagare.append(Deltagare("1236", "Ada", "Adasson", u"200601011244", False, "[email protected]", "12345679", u"Göteborg"))
dak.kort.deltagare.append(Deltagare("1237", "Ceda", "Cedasson", u"200701011244", False, "[email protected]", "12345679", u"Göteborg"))
sammankomst1 = Sammankomst(u"123", datetime.datetime(2019, 1, 1, 18, 30), 90, u"Möte")
sammankomst1.ledare.append(dak.kort.ledare[0])
sammankomst1.deltagare.append(dak.kort.deltagare[0])
sammankomst1.deltagare.append(dak.kort.deltagare[1])
sammankomst1.deltagare.append(dak.kort.deltagare[2])
dak.kort.sammankomster.append(sammankomst1)
sammankomst2 = Sammankomst(u"123", datetime.datetime(2019, 1, 7, 18, 30), 90, u"Möte")
sammankomst2.ledare.append(dak.kort.ledare[0])
sammankomst2.deltagare.append(dak.kort.deltagare[0])
sammankomst2.deltagare.append(dak.kort.deltagare[1])
sammankomst2.deltagare.append(dak.kort.deltagare[2])
dak.kort.sammankomster.append(sammankomst2)
sammankomst3 = Sammankomst(u"123", datetime.datetime(2019, 1, 14, 18, 30), 90, u"Möte")
sammankomst3.ledare.append(dak.kort.ledare[0])
sammankomst3.deltagare.append(dak.kort.deltagare[0])
sammankomst3.deltagare.append(dak.kort.deltagare[2])
dak.kort.sammankomster.append(sammankomst3)
return dak
def create_flask_app(cfg=None):
"Creates small app for the unit test"
template_path = os.path.join(os.path.dirname(__file__), '..', 'templates')
app = Flask(cfg, template_folder=template_path)
# app.config['DEBUG'] = True
# app.config['SERVER_NAME'] = 'localhost'
return app
class TestJsonReport(unittest.TestCase):
"DAK handling tests"
outputDir = 'derived'
expectedDir = 'expected'
@classmethod
def setUpClass(cls):
output_dir_full = os.path.join(os.path.dirname(__file__), cls.outputDir)
if not os.path.exists(output_dir_full):
os.makedirs(output_dir_full)
time.sleep(0.05)
@classmethod
def tearDownClass(cls):
output_dir_full = os.path.join(os.path.dirname(__file__), cls.outputDir)
os.rmdir(output_dir_full)
def save_and_check(self, generated_data, expected_file, generated_file, check_xsd=False, force=False):
"Save the data and read it back and check with expected file"
expected_path = os.path.join(os.path.dirname(__file__), TestJsonReport.expectedDir, expected_file)
generated_path = os.path.join(os.path.dirname(__file__), TestJsonReport.outputDir, generated_file)
with open(generated_path, "wb") as filep:
filep.write(generated_data)
if check_xsd:
self.validate.validate(generated_path)
if force:
shutil.copyfile(generated_path, expected_path)
with open(expected_path, "rb") as filep:
expected_text = filep.read()
with open(generated_path, "rb") as filep:
generated_text = filep.read()
self.assertEqual(expected_text, generated_text)
os.remove(generated_path)
def setUp(self):
# Google app engine testbed setup.
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
# Clear ndb's in-context cache between tests.
# This prevents data from leaking between tests.
# Alternatively, you could disable caching by
# using ndb.get_context().set_cache_policy(False)
ndb.get_context().clear_cache()
# Flask app test setup.
xsd_path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'importSchema.xsd')
self.validate = XmlValidator(xsd_path)
self.dak = create_dak_data()
self.app = create_flask_app('mytest')
# self.app_context = self.app.app_context()
# self.app_context.push()
self.client = self.app.test_client()
@self.app.route("/")
def hello(): # pylint: disable=unused-variable
return render_template('dak.xml', dak=self.dak)
def tearDown(self):
self.testbed.deactivate()
def test_dak_json_export(self):
"Test json report of DAK data"
semester = Semester.create(2019, False) # 2019, vt
jsonpickle.set_encoder_options('json', indent=4)
jsonreport = JsonReport(self.dak, semester)
stream = jsonreport.get_json(unpicklable=False, warn=True)
data = json.loads(stream)
self.assertEqual(data[u'foerenings_namn'], u"Test Scoutkår")
self.assertEqual(data[u'forenings_id'], u"1111")
self.assertEqual(data[u'kort'][u'namn_paa_kort'], "Testavdelning")
self.assertEqual(data[u'kort'][u'sammankomster'][0]['deltagare'][0]['uid'], "1235")
self.assertEqual(data[u'kort'][u'sammankomster'][0]['deltagare'][1]['uid'], "1236")
self.assertEqual(data[u'kort'][u'sammankomster'][0]['ledare'][0]['uid'], "1234")
self.assertEqual(data[u'kort'][u'naervarokort_nummer'], "1")
self.save_and_check(stream, 'dak_json_export.json', 'dak_json_export.json')
stream = jsonreport.get_report_string()
data = jsonpickle.decode(stream)
self.assertEqual(data.foerenings_namn, u"Test Scoutkår")
self.assertEqual(data.forenings_id, u"1111")
self.assertEqual(data.kort.namn_paa_kort, "Testavdelning")
self.assertEqual(data.kort.sammankomster[0].deltagare[0].uid, "1235")
self.assertEqual(data.kort.sammankomster[0].deltagare[1].uid, "1236")
self.assertEqual(data.kort.sammankomster[0].ledare[0].uid, "1234")
self.assertEqual(data.kort.naervarokort_nummer, "1")
self.save_and_check(stream, 'dak_json_pickable_export.json', 'dak_json_pickable_export.json', force=True)
def test_dak_xml_export(self):
"Test XML report of DAK data"
response = self.client.get('/')
self.save_and_check(response.data, 'dak_xml_export.xml', 'dak_xml_export.xml', check_xsd=True)
def test_dak_excel_export(self):
"Test excel report of DAK data"
current_semester = Semester.create(2019, False) # 2019, vt
excel_report = ExcelReport(self.dak, current_semester)
result_bytes = excel_report.getFilledInExcelSpreadsheet()
generated_path = os.path.join(os.path.dirname(__file__), TestJsonReport.outputDir, 'dak_excel_export.xlsx')
with open(generated_path, "wb") as filep:
filep.write(result_bytes)
workbook = load_workbook(generated_path)
worksheets = workbook.worksheets[0]
self.assertEqual(self.dak.kort.naervarokort_nummer, worksheets['E1'].value)
self.assertEqual(current_semester.year, worksheets['I1'].value)
self.assertEqual(self.dak.kort.namn_paa_kort, worksheets['D2'].value)
self.assertEqual("Scouting", worksheets['D3'].value)
self.assertEqual(self.dak.kort.lokal, worksheets['D4'].value)
if current_semester.ht:
self.assertEqual(worksheets['C6'].value, None)
self.assertEqual(worksheets['C7'].value, 'X')
else:
self.assertEqual(worksheets['C6'].value, 'X')
self.assertEqual(worksheets['C7'].value, None)
row = 13
for deltagaren in self.dak.kort.deltagare:
self.assertEqual(deltagaren.foernamn + " " + deltagaren.efternamn, worksheets['B' + str(row)].value)
self.assertEqual('K' if deltagaren.is_female() else 'M', worksheets['H' + str(row)].value)
self.assertEqual(deltagaren.personnummer[0:8], worksheets['J' + str(row)].value)
self.assertEqual(1, worksheets['K' + str(row)].value)
self.assertEqual(1, worksheets['L' + str(row)].value)
row += 1
self.assertEqual(1, worksheets['M13'].value)
self.assertEqual(None, worksheets['M14'].value)
self.assertEqual(1, worksheets['M15'].value)
os.remove(generated_path)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16346
|
import coremltools as ct
import numpy as np
import tensorflow as tf
path = r"C:\Users\Administrator\Desktop\yolov4-keras_stapler\pb\saved_model.pb"
# Load the protobuf file from the disk and parse it to retrieve the
# graph_def
with tf.io.gfile.GFile(path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
# Import the graph_def into a new Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
ops = graph.get_operations()
N = len(ops)
# print all the placeholder ops, these would be the inputs
print("Inputs ops: ")
for op in ops:
if op.type == "Placeholder":
print("op name: {}, output shape : {}".
format(op.name, op.outputs[0].get_shape()))
# print all the tensors that are the first output of an op
# and do not feed into any other op
# these are prospective outputs
print("\nProspective output tensor(s): ", )
sink_ops = []
input_tensors = set()
for op in ops:
for x in op.inputs:
if x.name not in input_tensors:
input_tensors.add(x.name)
for op in ops:
if len(op.outputs) > 0:
x = op.outputs[0]
if x.name not in input_tensors:
print("tensor name: {}, tensor shape : {}, parent op type: {}"
.format(x.name, x.get_shape(), op.type))
x = np.random.rand(1, 224, 224, 3)
with tf.Session(graph = graph) as sess:
tf_out = sess.run('MobilenetV2/Predictions/Reshape_1:0',
feed_dict={'input:0': x})
mlmodel = ct.convert(graph,
inputs=[ct.TensorType(shape=x.shape)])
# Core ML model prediction
coreml_out_dict = mlmodel.predict({"input" : x}, useCPUOnly=True)
coreml_out = list(coreml_out_dict.values())[0]
np.testing.assert_allclose(tf_out, coreml_out, rtol=1e-3, atol=1e-2)
|
the-stack_106_16347
|
import asyncio
import logging
import types
from collections import defaultdict
from typing import Dict
logger = logging.getLogger(__name__)
class EventManager:
__subscribers: Dict = defaultdict(set)
@classmethod
def subscribe(cls, subscriber, event):
cls.__subscribers[event].add(subscriber)
@classmethod
async def trigger(cls, event):
async_tasks = []
for subscriber in cls.__subscribers.get(event.__class__, []):
task = asyncio.create_task(subscriber(event))
async_tasks.append(task)
await asyncio.gather(*async_tasks)
@classmethod
def _reset(cls):
"""Never call this outside tests!"""
cls.__subscribers = defaultdict(set)
@classmethod
def subscribers(cls) -> Dict:
return dict(cls.__subscribers)
|
the-stack_106_16349
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 19:35:05 2021
@author: Jireh Jam
"""
from __future__ import print_function, division
from keras.applications import VGG19
from keras.layers import Input, Dense, Flatten, Dropout, Concatenate, Multiply, Lambda, Add
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D,MaxPooling2D,Conv2DTranspose
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import multi_gpu_model
from keras import backend as K
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import time
import gc
import random
class RMNETWGAN():
def __init__(self,config):
#Input shape
self.img_width=config.img_width
self.img_height=config.img_height
self.channels=config.channels
self.mask_channles = config.mask_channels
self.img_shape=(self.img_width, self.img_height, self.channels)
self.img_shape_mask=(self.img_width, self.img_height, self.mask_channles)
self.missing_shape = (self.img_width, self.img_height, self.channels)
self.num_epochs = config.num_epochs
self.batch_size = config.batch_size
self.start_time = time.time()
self.end_time = time.time()
self.sample_interval = config.sample_interval
self.current_epoch =config.current_epoch
self.last_trained_epoch = config.last_trained_epoch
#Folders
self.dataset_name = 'RMNet_WACV2021'
self.models_path = 'models'
#Configure Loader
self.img_dir = r'./images/train/celebA_HQ_train/'
self.masks_dir = r'./masks/train/qd_imd/train/'
self.imgs_in_path = os.listdir(self.img_dir)
self.masks_in_path = os.listdir(self.masks_dir)
# Number of filters in the first layer of G and D
self.gf = config.gf
self.df = config.gf
self.continue_train = True
#Optimizer
self.g_optimizer = Adam(lr=config.g_learning_rate,
beta_1=config.beta_1,
beta_2=config.beta_2,
epsilon=config.epsilon)
self.d_optimizer = Adam(lr=config.d_learning_rate,
beta_1=config.beta_1,
beta_2=config.beta_2,
epsilon=config.epsilon)
# =================================================================================== #
# 1. Build and compile the discriminator #
# =================================================================================== #
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=[self.wasserstein_loss],
optimizer=self.d_optimizer,
metrics=['accuracy'])
# =================================================================================== #
# 2. Build the generator #
# =================================================================================== #
self.generator = self.build_generator()
# =================================================================================== #
# 3. The combined model (stacked generator and discriminator) #
# Trains the generator to fool the discriminator #
# =================================================================================== #
try:
self.multi_model = multi_gpu_model(self.combined, gpus=2)
self.multi_model.compile(loss=[self.generator_loss, self.wasserstein_loss], loss_weights=[1.0, 1e-3], optimizer=self.g_optimizer)
except:
self.combined = self.build_gan(self.generator, self.discriminator)
self.combined.compile(loss=[self.generator_loss, self.wasserstein_loss],loss_weights=[1, 1e-3], optimizer=self.g_optimizer)
def build_gan(self, generator, discriminator):
#Generator takes mask and image as input
image = Input(shape=self.img_shape)
mask = Input(shape=self.img_shape_mask)
#Generator predicts image
gen_output = generator([image, mask])
#Train the generator only for the combined model
discriminator.trainable = False
#Descriminator validates the predicted image
# It takes generated images as input and determines validity
gen_img = Lambda(lambda x : x[:,:,:,0:3])(gen_output)
# print("this is generated image in shape {} ".format(gen_image.shape))
score = discriminator(gen_img)
model = Model([image, mask], [gen_output, score])
return model
# =================================================================================== #
# 4. Define the discriminator and generator losses #
# =================================================================================== #
def wasserstein_loss(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def generator_loss(self, y_true, y_pred):
mask = Lambda(lambda x : x[:,:,:,3:])(y_true)
reversed_mask = Lambda(self.reverse_mask, output_shape=(self.img_shape_mask))(mask)
input_img = Lambda(lambda x : x[:,:,:,0:3])(y_true)
output_img = Lambda(lambda x : x[:,:,:,0:3])(y_pred)
vgg = VGG19(include_top=False, weights='imagenet', input_shape=self.img_shape)
loss_model = Model(inputs=vgg.input, outputs=vgg.get_layer('block3_conv3').output)
loss_model.trainable = False
p_loss = K.mean(K.square(loss_model(output_img) - loss_model(input_img)))
masking = Multiply()([reversed_mask,input_img])
predicting = Multiply()([reversed_mask, output_img])
reversed_mask_loss = (K.mean(K.square(loss_model(predicting) - loss_model(masking))))
new_loss = 0.6*(p_loss) + 0.4*reversed_mask_loss
return new_loss
# =================================================================================== #
# 5. Define the reverese mask #
# =================================================================================== #
def reverse_mask(self,x):
return 1-x
# =================================================================================== #
# 6. Define the generator #
# =================================================================================== #
def build_generator(self):
#compute inputs
input_img = Input(shape=(self.img_shape), dtype='float32', name='image_input')
input_mask = Input(shape=(self.img_shape_mask), dtype='float32',name='mask_input')
reversed_mask = Lambda(self.reverse_mask,output_shape=(self.img_shape_mask))(input_mask)
masked_image = Multiply()([input_img,reversed_mask])
#encoder
x =(Conv2D(self.gf,(5, 5), dilation_rate=2, input_shape=self.img_shape, padding="same",name="enc_conv_1"))(masked_image)
x =(LeakyReLU(alpha=0.2))(x)
x =(BatchNormalization(momentum=0.8))(x)
pool_1 = MaxPooling2D(pool_size=(2,2))(x)
x =(Conv2D(self.gf,(5, 5), dilation_rate=2, padding="same",name="enc_conv_2"))(pool_1)
x =(LeakyReLU(alpha=0.2))(x)
x =(BatchNormalization(momentum=0.8))(x)
pool_2 = MaxPooling2D(pool_size=(2,2))(x)
x =(Conv2D(self.gf*2, (5, 5), dilation_rate=2, padding="same",name="enc_conv_3"))(pool_2)
x =(LeakyReLU(alpha=0.2))(x)
x =(BatchNormalization(momentum=0.8))(x)
pool_3 = MaxPooling2D(pool_size=(2,2))(x)
x =(Conv2D(self.gf*4, (5, 5), dilation_rate=2, padding="same",name="enc_conv_4"))(pool_3)
x =(LeakyReLU(alpha=0.2))(x)
x =(BatchNormalization(momentum=0.8))(x)
pool_4 = MaxPooling2D(pool_size=(2,2))(x)
x =(Conv2D(self.gf*8, (5, 5), dilation_rate=2, padding="same",name="enc_conv_5"))(pool_4)
x =(LeakyReLU(alpha=0.2))(x)
x =(Dropout(0.5))(x)
#Decoder
x =(UpSampling2D(size=(2, 2), interpolation='bilinear'))(x)
x =(Conv2DTranspose(self.gf*8, (3, 3), padding="same",name="upsample_conv_1"))(x)
x = Lambda(lambda x: tf.pad(x,[[0,0],[0,0],[0,0],[0,0]],'REFLECT'))(x)
x =(Activation('relu'))(x)
x =(BatchNormalization(momentum=0.8))(x)
x =(UpSampling2D(size=(2, 2), interpolation='bilinear'))(x)
x = (Conv2DTranspose(self.gf*4, (3, 3), padding="same",name="upsample_conv_2"))(x)
x = Lambda(lambda x: tf.pad(x,[[0,0],[0,0],[0,0],[0,0]],'REFLECT'))(x)
x =(Activation('relu'))(x)
x =(BatchNormalization(momentum=0.8))(x)
x =(UpSampling2D(size=(2, 2), interpolation='bilinear'))(x)
x = (Conv2DTranspose(self.gf*2, (3, 3), padding="same",name="upsample_conv_3"))(x)
x = Lambda(lambda x: tf.pad(x,[[0,0],[0,0],[0,0],[0,0]],'REFLECT'))(x)
x =(Activation('relu'))(x)
x =(BatchNormalization(momentum=0.8))(x)
x =(UpSampling2D(size=(2, 2), interpolation='bilinear'))(x)
x = (Conv2DTranspose(self.gf, (3, 3), padding="same",name="upsample_conv_4"))(x)
x = Lambda(lambda x: tf.pad(x,[[0,0],[0,0],[0,0],[0,0]],'REFLECT'))(x)
x =(Activation('relu'))(x)
x =(BatchNormalization(momentum=0.8))(x)
x = (Conv2DTranspose(self.channels, (3, 3), padding="same",name="final_output"))(x)
x =(Activation('tanh'))(x)
decoded_output = x
reversed_mask_image = Multiply()([decoded_output, input_mask])
output_img = Add()([masked_image,reversed_mask_image])
concat_output_img = Concatenate()([output_img,input_mask])
model = Model(inputs = [input_img, input_mask], outputs = [concat_output_img])
print("====Generator Summary===")
model.summary()
return model
# =================================================================================== #
# 7. Define the discriminator #
# =================================================================================== #
def build_discriminator(self):
input_img = Input(shape=(self.missing_shape), dtype='float32', name='d_input')
dis = (Conv2D(self.df, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))(input_img)
dis = (LeakyReLU(alpha=0.2))(dis)
dis = (Dropout(0.25))(dis)
dis = (Conv2D(self.df*2, kernel_size=3, strides=2, padding="same"))(dis)
dis = (ZeroPadding2D(padding=((0,1),(0,1))))(dis)
dis = (BatchNormalization(momentum=0.8))(dis)
dis = (LeakyReLU(alpha=0.2))(dis)
dis = (Dropout(0.25))(dis)
dis = (Conv2D(self.df*4, kernel_size=3, strides=2, padding="same"))(dis)
dis = (BatchNormalization(momentum=0.8))(dis)
dis = (LeakyReLU(alpha=0.2))(dis)
dis = (Dropout(0.25))(dis)
dis = (Conv2D(self.df*8, kernel_size=3, strides=2, padding="same"))(dis)
dis = (BatchNormalization(momentum=0.8))(dis)
dis = (LeakyReLU(alpha=0.2))(dis)
dis = (Dropout(0.25))(dis)
dis = (Flatten())(dis)
dis = (Dense(1))(dis)
model = Model(inputs=[input_img], outputs=dis)
print("====Discriminator Summary===")
model.summary()
return model
# =================================================================================== #
# 8. Define the loading function #
# =================================================================================== #
def get_batch(self, imgs_index, batch_imgs):
if(imgs_index+batch_imgs) >= len(self.imgs_in_path):
batch_imgs = len(self.imgs_in_path)-imgs_index
real_imgs = np.zeros((batch_imgs, self.img_width, self.img_height,3))
masks = np.zeros((batch_imgs, self.img_width, self.img_height,1))
masked_imgs = np.zeros((batch_imgs, self.img_width, self.img_height,3))
masks_index = random.sample(range(1,len(self.masks_in_path)), batch_imgs)
maskindx = 0
for i in range(batch_imgs):
print("\rLoading image number "+ str(i) + " of " + str(len(self.imgs_in_path)), end = " ")
real_img = cv2.imread(self.img_dir + self.imgs_in_path[imgs_index], 1).astype('float')/ 127.5 -1
real_img = cv2.resize(real_img,(self.img_width, self.img_height))
#If masks bits are white, DO NOT subtract from 1.
#If masks bits are black, subtract from 1.
mask = 1-cv2.imread(self.masks_dir + self.masks_in_path[masks_index[maskindx]],0).astype('float')/ 255
mask = cv2.resize(mask,(self.img_width, self.img_height))
mask = np.reshape(mask,(self.img_width, self.img_height,1))
masks[i] = mask
real_imgs[i] = real_img
#masked_imgs[np.where((mask ==[1,1,1]).all(axis=2))]=[255,255,255]
masked_imgs[i][np.where(mask == 0)]=1
maskindx +=1
imgs_index +=1
if(imgs_index >= len(self.imgs_in_path)):
imgs_index = 0
# cv2.imwrite(os.path.join(path, 'mask_'+str(i)+'.jpg'),rawmask)
# cv2.imshow("mask",((masked_imgs[0]+1)* 127.5).astype("uint8"))
# cv2.waitKey(0 )
return imgs_index,real_imgs, masks,masked_imgs
# =================================================================================== #
# 8. Define the loading function #
# =================================================================================== #
def train(self):
# Ground truths for adversarial loss
valid = np.ones([self.batch_size, 1])
fake = -np.ones((self.batch_size, 1))
total_files= 27000
batch_imgs = 1000
imgs_index =0
dataLoads = total_files//batch_imgs
#self.generator.load_weights(r'./{}/{}/weight_{}.h5'.format(self.models_path, self.dataset_name, self.last_trained_epoch))
# print ( "Successfully loaded last check point" )
for epoch in range(1, self.num_epochs + 1):
for databatchs in range(dataLoads):
imgs_index,imgs, masks,masked_imgs = self.get_batch(imgs_index, batch_imgs)
batches = imgs.shape[0]//self.batch_size
global_step = 0
for batch in range(batches):
idx = np.random.permutation(imgs.shape[0])
idx_batches = idx[batch*self.batch_size:(batch+1)*self.batch_size]
gen_imgs=self.generator.predict([imgs[idx_batches],masks[idx_batches]], self.batch_size)
gen_imgs = gen_imgs[:,:,:,0:3]
# =================================================================================== #
# 8.2. Train the discriminator #
# =================================================================================== #
self.discriminator.trainable = True
d_loss_real = self.discriminator.train_on_batch(imgs[idx_batches], valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs[:,:,:,0:3], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# =================================================================================== #
# 8.3. Train the generator #
# =================================================================================== #
# Train the generator
self.discriminator.trainable = False
g_loss = self.combined.train_on_batch([imgs[idx_batches], masks[idx_batches]],
[K.stack([imgs[idx_batches], masks[idx_batches]], axis=-1),valid])
# =================================================================================== #
# 8.4. Plot the progress #
# =================================================================================== #
print ("Epoch: %d Batch: %d/%d dataloads: %d/%d [D loss: %f, op_acc: %.2f%%] [G loss: %f MSE loss: %f]" % (epoch+self.current_epoch,
batch, batches,databatchs,dataLoads, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
idx_batches = idx[databatchs*self.batch_size:(databatchs+1)*self.batch_size]
imgs = imgs[idx]
masks = masks[idx]
input_img = np.expand_dims(imgs[0], 0)
input_mask = np.expand_dims(masks[0], 0)
if epoch % 1 == 0:
if not os.path.exists("{}/{}/".format(self.models_path, self.dataset_name)):
os.makedirs("{}/{}/".format(self.models_path, self.dataset_name))
name = "{}/{}/weight_{}.h5".format(self.models_path, self.dataset_name, epoch+self.current_epoch)
self.generator.save_weights(name)
if not os.path.exists(self.dataset_name):
os.makedirs(self.dataset_name,exist_ok=True)
predicted_img = self.generator.predict([input_img, input_mask])
self.sample_images(self.dataset_name, input_img, predicted_img[:,:,:,0:3],
input_mask, epoch)
print("Total Processing time:: {:4.2f}min" .format((self.end_time - self.start_time)/60))
self.epoch+=1
# =================================================================================== #
# 9. Sample images during training #
# =================================================================================== #
def sample_images(self, dataset_name,input_img, sample_pred, mask, epoch):
if not os.path.exists(self.dataset_name):
os.makedirs(self.dataset_name)
input_img = np.expand_dims(input_img[0], 0)
input_mask = np.expand_dims(mask[0], 0)
maskedImg = ((1 - input_mask)*input_img) + input_mask
img = np.concatenate((((maskedImg[0]+1)* 127.5).astype("uint8"),
((sample_pred[0]+1)* 127.5).astype("uint8"),
((input_img[0]+1)* 127.5).astype("uint8")),axis=1)
img_filepath = os.path.join(self.dataset_name, 'pred_{}.jpg'.format(epoch+self.current_epoch))
cv2.imwrite(img_filepath, img)
# =================================================================================== #
# 10. Plot the discriminator and generator losses #
# =================================================================================== #
def plot_logs(self,epoch, avg_d_loss, avg_g_loss):
if not os.path.exists("LogsUnet"):
os.makedirs("LogsUnet")
plt.figure()
plt.plot(range(len(avg_d_loss)), avg_d_loss,
color='red', label='Discriminator loss')
plt.plot(range(len(avg_g_loss)), avg_g_loss,
color='blue', label='Adversarial loss')
plt.title('Discriminator and Adversarial loss')
plt.xlabel('Iterations')
plt.ylabel('Loss (Adversarial/Discriminator)')
plt.legend()
plt.savefig("LogsUnet/{}_paper/log_ep{}.pdf".format(self.dataset_name, epoch+self.current_epoch))
|
the-stack_106_16350
|
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Main class for Senpy.
It orchestrates plugin (de)activation and analysis.
"""
from future import standard_library
standard_library.install_aliases()
from . import plugins, api
from .models import Error, AggregatedEvaluation
from .plugins import AnalysisPlugin
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
from threading import Thread
from functools import partial
import os
import copy
import errno
import logging
from . import gsitk_compat
logger = logging.getLogger(__name__)
class Senpy(object):
""" Default Senpy extension for Flask """
def __init__(self,
app=None,
plugin_folder=".",
data_folder=None,
default_plugins=False):
default_data = os.path.join(os.getcwd(), 'senpy_data')
self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data)
try:
os.makedirs(self.data_folder)
except OSError as e:
if e.errno == errno.EEXIST:
logger.debug('Data folder exists: {}'.format(self.data_folder))
else: # pragma: no cover
raise
self._default = None
self._plugins = {}
if plugin_folder:
self.add_folder(plugin_folder)
if default_plugins:
self.add_folder('plugins', from_root=True)
else:
# Add only conversion plugins
self.add_folder(os.path.join('plugins', 'postprocessing'),
from_root=True)
self.app = app
if app is not None:
self.init_app(app)
self._conversion_candidates = {}
def init_app(self, app):
""" Initialise a flask app to add plugins to its context """
"""
Note: I'm not particularly fond of adding self.app and app.senpy, but
I can't think of a better way to do it.
"""
app.senpy = self
# Use the newstyle teardown_appcontext if it's available,
# otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else: # pragma: no cover
app.teardown_request(self.teardown)
app.register_blueprint(api_blueprint, url_prefix="/api")
app.register_blueprint(ns_blueprint, url_prefix="/ns")
app.register_blueprint(demo_blueprint, url_prefix="/")
def add_plugin(self, plugin):
self._plugins[plugin.name.lower()] = plugin
self._conversion_candidates = {}
def delete_plugin(self, plugin):
del self._plugins[plugin.name.lower()]
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
""" Return the plugins registered for a given application. Filtered by criteria """
return sorted(plugins.pfilter(self._plugins,
plugin_type=plugin_type,
is_activated=is_activated,
**kwargs),
key=lambda x: x.id)
def get_plugin(self, name, default=None):
if name == 'default':
return self.default_plugin
elif name == 'conversion':
return None
if name.lower() in self._plugins:
return self._plugins[name.lower()]
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
plugin_type=None)
if results:
return results[0]
results = self.plugins(id=name,
plugin_type=None)
if results:
return results[0]
msg = ("Plugin not found: '{}'\n"
"Make sure it is ACTIVATED\n"
"Valid algorithms: {}").format(name,
self._plugins.keys())
raise Error(message=msg, status=404)
def get_plugins(self, name):
try:
name = name.split(',')
except AttributeError:
pass # Assume it is a tuple or a list
return tuple(self.get_plugin(n) for n in name)
def analysis_plugins(self, **kwargs):
""" Return only the analysis plugins that are active"""
candidates = self.plugins(**kwargs)
return list(plugins.pfilter(candidates, plugin_type=AnalysisPlugin))
def add_folder(self, folder, from_root=False):
""" Find plugins in this folder and add them to this instance """
if from_root:
folder = os.path.join(os.path.dirname(__file__), folder)
logger.debug("Adding folder: %s", folder)
if os.path.isdir(folder):
new_plugins = plugins.from_folder([folder],
data_folder=self.data_folder)
for plugin in new_plugins:
self.add_plugin(plugin)
else:
raise AttributeError("Not a folder or does not exist: %s", folder)
def _process(self, req, pending, done=None):
"""
Recursively process the entries with the first plugin in the list, and pass the results
to the rest of the plugins.
"""
done = done or []
if not pending:
return req
analysis = pending[0]
results = analysis.run(req)
results.activities.append(analysis)
done += analysis
return self._process(results, pending[1:], done)
def install_deps(self):
logger.info('Installing dependencies')
# If a plugin is activated, its dependencies should already be installed
# Otherwise, it would've failed to activate.
plugins.install_deps(*self.plugins(is_activated=False))
def analyse(self, request, analyses=None):
"""
Main method that analyses a request, either from CLI or HTTP.
It takes a processed request, provided by the user, as returned
by api.parse_call().
"""
if not self.plugins():
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
if analyses is None:
plugins = self.get_plugins(request.parameters['algorithm'])
analyses = api.parse_analyses(request.parameters, plugins)
logger.debug("analysing request: {}".format(request))
results = self._process(request, analyses)
logger.debug("Got analysis result: {}".format(results))
results = self.postprocess(results, analyses)
logger.debug("Returning post-processed result: {}".format(results))
return results
def convert_emotions(self, resp, analyses):
"""
Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up
"""
logger.debug("Converting emotions")
if 'parameters' not in resp:
logger.debug("NO PARAMETERS")
return resp
params = resp['parameters']
toModel = params.get('emotion-model', None)
if not toModel:
logger.debug("NO tomodel PARAMETER")
return resp
logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None)
newentries = []
done = []
for i in resp.entries:
if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
activity = j['prov:wasGeneratedBy']
act = resp.activity(activity)
if not act:
raise Error('Could not find the emotion model for {}'.format(activity))
fromModel = act.plugin['onyx:usesEmotionModel']
if toModel == fromModel:
continue
candidate = self._conversion_candidate(fromModel, toModel)
if not candidate:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
analysis = candidate.activity(params)
done.append(analysis)
for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = analysis.id
if output == 'nested':
k.prov__wasDerivedFrom = j
newemotions.append(k)
i.emotions = newemotions
newentries.append(i)
resp.entries = newentries
return resp
def _conversion_candidate(self, fromModel, toModel):
if not self._conversion_candidates:
candidates = {}
for conv in self.plugins(plugin_type=plugins.EmotionConversion):
for pair in conv.onyx__doesConversion:
logging.debug(pair)
key = (pair['onyx:conversionFrom'], pair['onyx:conversionTo'])
if key not in candidates:
candidates[key] = []
candidates[key].append(conv)
self._conversion_candidates = candidates
key = (fromModel, toModel)
if key not in self._conversion_candidates:
return None
return self._conversion_candidates[key][0]
def postprocess(self, response, analyses):
'''
Transform the results from the analysis plugins.
It has some pre-defined post-processing like emotion conversion,
and it also allows plugins to auto-select themselves.
'''
response = self.convert_emotions(response, analyses)
for plug in self.plugins(plugin_type=plugins.PostProcessing):
if plug.check(response, response.activities):
activity = plug.activity(response.parameters)
response = plug.process(response, activity)
return response
def _get_datasets(self, request):
datasets_name = request.parameters.get('dataset', None).split(',')
for dataset in datasets_name:
if dataset not in gsitk_compat.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(
dataset, gsitk_compat.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
return datasets_name
def evaluate(self, params):
logger.debug("evaluating request: {}".format(params))
results = AggregatedEvaluation()
results.parameters = params
datasets = self._get_datasets(results)
plugs = []
for plugname in params['algorithm']:
plugs = self.get_plugins(plugname)
for plug in plugs:
if not isinstance(plug, plugins.Evaluable):
raise Exception('Plugin {} can not be evaluated', plug.id)
for eval in plugins.evaluate(plugs, datasets):
results.evaluations.append(eval)
if 'with-parameters' not in results.parameters:
del results.parameters
logger.debug("Returning evaluation result: {}".format(results))
return results
@property
def default_plugin(self):
if not self._default or not self._default.is_activated:
candidates = self.analysis_plugins()
if len(candidates) > 0:
self._default = candidates[0]
else:
self._default = None
logger.debug("Default: {}".format(self._default))
return self._default
@default_plugin.setter
def default_plugin(self, value):
if isinstance(value, plugins.Plugin):
if not value.is_activated:
raise AttributeError('The default plugin has to be activated.')
self._default = value
else:
self._default = self._plugins[value.lower()]
def activate_all(self, sync=True, allow_fail=False):
ps = []
for plug in self._plugins.keys():
try:
self.activate_plugin(plug, sync=sync)
except Exception as ex:
if not allow_fail:
raise
logger.error('Could not activate {}: {}'.format(plug, ex))
return ps
def deactivate_all(self, sync=True):
ps = []
for plug in self._plugins.keys():
ps.append(self.deactivate_plugin(plug, sync=sync))
return ps
def _activate(self, plugin):
success = False
with plugin._lock:
if plugin.is_activated:
return
plugin._activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = plugin.is_activated
return success
def activate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error(
message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
logger.info("Activating plugin: {}".format(plugin.name))
if sync or not getattr(plugin, 'async', True) or getattr(
plugin, 'sync', False):
return self._activate(plugin)
else:
th = Thread(target=partial(self._activate, plugin))
th.start()
return th
def _deactivate(self, plugin):
with plugin._lock:
if not plugin.is_activated:
return
plugin._deactivate()
logger.info("Plugin deactivated: {}".format(plugin.name))
def deactivate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error(
message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
if sync or not getattr(plugin, 'async', True) or not getattr(
plugin, 'sync', False):
plugin._deactivate()
else:
th = Thread(target=plugin.deactivate)
th.start()
return th
def teardown(self, exception):
pass
|
the-stack_106_16352
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
ToKey
"""
__all__ = ["ToKey"]
from ...entrypoints.transforms_texttokeyconverter import \
transforms_texttokeyconverter
from ...utils.utils import trace
from ..base_pipeline_item import BasePipelineItem, DefaultSignature
class ToKey(BasePipelineItem, DefaultSignature):
"""
Text transforms that can be performed on data before training
a model.
.. remarks::
The ``ToKey`` transform converts a column of text to key values
using a dictionary. This operation can be reversed by using
:py:class:`FromKey <nimbusml.preprocessing.FromKey>` to obtain the
orginal values.
:param max_num_terms: Maximum number of terms to keep per column when auto-
training.
:param term: List of terms.
:param sort: How items should be ordered when vectorized. By default, they
will be in the order encountered. If by value items are sorted
according to their default comparison, e.g., text sorting will be case
sensitive (e.g., 'A' then 'Z' then 'a').
:param text_key_values: Whether key value metadata should be text,
regardless of the actual input type.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`FromKey <nimbusml.preprocessing.FromKey>`,
:py:class:`OneHotHashVectorizer
<nimbusml.feature_extraction.categorical.OneHotHashVectorizer>`,
:py:class:`OneHotVectorizer
<nimbusml.feature_extraction.categorical.OneHotVectorizer>`,
:py:class:`NGramFeaturizer
<nimbusml.feature_extraction.text.NGramFeaturizer>`.
.. index:: transform, preprocessing, text
Example:
.. literalinclude:: /../nimbusml/examples/ToKey.py
:language: python
"""
@trace
def __init__(
self,
max_num_terms=1000000,
term=None,
sort='Occurrence',
text_key_values=False,
**params):
BasePipelineItem.__init__(
self, type='transform', **params)
self.max_num_terms = max_num_terms
self.term = term
self.sort = sort
self.text_key_values = text_key_values
@property
def _entrypoint(self):
return transforms_texttokeyconverter
@trace
def _get_node(self, **all_args):
input_columns = self.input
if input_columns is None and 'input' in all_args:
input_columns = all_args['input']
if 'input' in all_args:
all_args.pop('input')
output_columns = self.output
if output_columns is None and 'output' in all_args:
output_columns = all_args['output']
if 'output' in all_args:
all_args.pop('output')
# validate input
if input_columns is None:
raise ValueError(
"'None' input passed when it cannot be none.")
if not isinstance(input_columns, list):
raise ValueError(
"input has to be a list of strings, instead got %s" %
type(input_columns))
# validate output
if output_columns is None:
output_columns = input_columns
if not isinstance(output_columns, list):
raise ValueError(
"output has to be a list of strings, instead got %s" %
type(output_columns))
algo_args = dict(
column=[
dict(
Source=i,
Name=o) for i,
o in zip(
input_columns,
output_columns)] if input_columns else None,
max_num_terms=self.max_num_terms,
term=self.term,
sort=self.sort,
text_key_values=self.text_key_values)
all_args.update(algo_args)
return self._entrypoint(**all_args)
|
the-stack_106_16353
|
#!/usr/bin/env python
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from math import ceil
from time import time
import numpy as np
import cupy as cp
import fire
import h5py
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states
from tqdm import tqdm
from larndsim import consts
logo = """
_ _ _
| | | | (_)
| | __ _ _ __ _ __ __| |______ ___ _ _ __ ___
| |/ _` | '__| '_ \ / _` |______/ __| | '_ ` _ \\
| | (_| | | | | | | (_| | \__ \ | | | | | |
|_|\__,_|_| |_| |_|\__,_| |___/_|_| |_| |_|
"""
def cupy_unique_axis0(array):
# axis is still not supported for cupy.unique, this
# is a workaround
if len(array.shape) != 2:
raise ValueError("Input array must be 2D.")
sortarr = array[cp.lexsort(array.T[::-1])]
mask = cp.empty(array.shape[0], dtype=cp.bool_)
mask[0] = True
mask[1:] = cp.any(sortarr[1:] != sortarr[:-1], axis=1)
return sortarr[mask]
def run_simulation(input_filename,
pixel_layout,
detector_properties,
output_filename='',
n_tracks=100000):
"""
Command-line interface to run the simulation of a pixelated LArTPC
Args:
input_filename (str): path of the edep-sim input file
output_filename (str): path of the HDF5 output file. If not specified
the output is added to the input file.
pixel_layout (str): path of the YAML file containing the pixel
layout and connection details.
detector_properties (str): path of the YAML file containing
the detector properties
n_tracks (int): number of tracks to be simulated
"""
start_simulation = time()
from cupy.cuda.nvtx import RangePush, RangePop
RangePush("run_simulation")
print(logo)
print("**************************\nLOADING SETTINGS AND INPUT\n**************************")
print("Pixel layout file:", pixel_layout)
print("Detector propeties file:", detector_properties)
print("edep-sim input file:", input_filename)
RangePush("load_detector_properties")
consts.load_detector_properties(detector_properties, pixel_layout)
RangePop()
RangePush("load_larndsim_modules")
# Here we load the modules after loading the detector properties
# maybe can be implemented in a better way?
from larndsim import quenching, drifting, detsim, pixels_from_track, fee
RangePop()
RangePush("load_hd5_file")
# First of all we load the edep-sim output
# For this sample we need to invert $z$ and $y$ axes
with h5py.File(input_filename, 'r') as f:
tracks = np.array(f['segments'])
RangePop()
RangePush("slicing_and_swapping")
tracks = tracks[:n_tracks]
x_start = np.copy(tracks['x_start'] )
x_end = np.copy(tracks['x_end'])
x = np.copy(tracks['x'])
tracks['x_start'] = np.copy(tracks['z_start'])
tracks['x_end'] = np.copy(tracks['z_end'])
tracks['x'] = np.copy(tracks['z'])
tracks['z_start'] = x_start
tracks['z_end'] = x_end
tracks['z'] = x
RangePop()
TPB = 256
BPG = ceil(tracks.shape[0] / TPB)
print("*******************\nSTARTING SIMULATION\n*******************")
# We calculate the number of electrons after recombination (quenching module)
# and the position and number of electrons after drifting (drifting module)
print("Quenching electrons...",end='')
start_quenching = time()
RangePush("quench")
quenching.quench[BPG,TPB](tracks, consts.birks)
RangePop()
end_quenching = time()
print(f" {end_quenching-start_quenching:.2f} s")
print("Drifting electrons...",end='')
start_drifting = time()
RangePush("drift")
drifting.drift[BPG,TPB](tracks)
RangePop()
end_drifting = time()
print(f" {end_drifting-start_drifting:.2f} s")
step = 1
adc_tot_list = cp.empty((0,fee.MAX_ADC_VALUES))
adc_tot_ticks_list = cp.empty((0,fee.MAX_ADC_VALUES))
MAX_TRACKS_PER_PIXEL = 5
backtracked_id_tot = cp.empty((0,fee.MAX_ADC_VALUES,MAX_TRACKS_PER_PIXEL))
unique_pix_tot = cp.empty((0,2))
tot_events = 0
tot_evids = np.unique(tracks['eventID'])
# We divide the sample in portions that can be processed by the GPU
tracks_batch_runtimes = []
for ievd in tqdm(range(0, tot_evids.shape[0], step), desc='Simulating pixels...'):
start_tracks_batch = time()
first_event = tot_evids[ievd]
last_event = tot_evids[min(ievd+step, tot_evids.shape[0]-1)]
if first_event == last_event:
last_event += 1
evt_tracks = tracks[(tracks['eventID']>=first_event) & (tracks['eventID']<last_event)]
first_trk_id = np.where(tracks['eventID']==evt_tracks['eventID'][0])[0][0]
for itrk in range(0, evt_tracks.shape[0], 600):
selected_tracks = evt_tracks[itrk:itrk+600]
RangePush("event_id_map")
# Here we build a map between tracks and event IDs
event_ids = selected_tracks['eventID']
unique_eventIDs = np.unique(event_ids)
event_id_map = np.searchsorted(unique_eventIDs, event_ids)
RangePop()
# We find the pixels intersected by the projection of the tracks on
# the anode plane using the Bresenham's algorithm. We also take into
# account the neighboring pixels, due to the transverse diffusion of the charges.
RangePush("pixels_from_track")
longest_pix = ceil(max(selected_tracks["dx"])/consts.pixel_pitch)
max_radius = ceil(max(selected_tracks["tran_diff"])*5/consts.pixel_pitch)
MAX_PIXELS = int((longest_pix*4+6)*max_radius*1.5)
MAX_ACTIVE_PIXELS = int(longest_pix*1.5)
active_pixels = cp.full((selected_tracks.shape[0], MAX_ACTIVE_PIXELS, 2), -1, dtype=np.int32)
neighboring_pixels = cp.full((selected_tracks.shape[0], MAX_PIXELS, 2), -1, dtype=np.int32)
n_pixels_list = cp.zeros(shape=(selected_tracks.shape[0]))
threadsperblock = 128
blockspergrid = ceil(selected_tracks.shape[0] / threadsperblock)
if not active_pixels.shape[1]:
continue
pixels_from_track.get_pixels[blockspergrid,threadsperblock](selected_tracks,
active_pixels,
neighboring_pixels,
n_pixels_list,
max_radius+1)
RangePop()
RangePush("unique_pix")
shapes = neighboring_pixels.shape
joined = neighboring_pixels.reshape(shapes[0]*shapes[1],2)
unique_pix = cupy_unique_axis0(joined)
unique_pix = unique_pix[(unique_pix[:,0] != -1) & (unique_pix[:,1] != -1),:]
RangePop()
if not unique_pix.shape[0]:
continue
RangePush("time_intervals")
# Here we find the longest signal in time and we store an array with the start in time of each track
max_length = cp.array([0])
track_starts = cp.empty(selected_tracks.shape[0])
# d_track_starts = cuda.to_device(track_starts)
threadsperblock = 128
blockspergrid = ceil(selected_tracks.shape[0] / threadsperblock)
detsim.time_intervals[blockspergrid,threadsperblock](track_starts, max_length, event_id_map, selected_tracks)
RangePop()
RangePush("tracks_current")
# Here we calculate the induced current on each pixel
signals = cp.zeros((selected_tracks.shape[0],
neighboring_pixels.shape[1],
cp.asnumpy(max_length)[0]), dtype=np.float32)
threadsperblock = (1,1,64)
blockspergrid_x = ceil(signals.shape[0] / threadsperblock[0])
blockspergrid_y = ceil(signals.shape[1] / threadsperblock[1])
blockspergrid_z = ceil(signals.shape[2] / threadsperblock[2])
blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)
detsim.tracks_current[blockspergrid,threadsperblock](signals,
neighboring_pixels,
selected_tracks)
RangePop()
RangePush("pixel_index_map")
# Here we create a map between tracks and index in the unique pixel array
pixel_index_map = cp.full((selected_tracks.shape[0], neighboring_pixels.shape[1]), -1)
compare = neighboring_pixels[..., np.newaxis, :] == unique_pix
indices = cp.where(cp.logical_and(compare[..., 0], compare[..., 1]))
pixel_index_map[indices[0], indices[1]] = indices[2]
RangePop()
RangePush("sum_pixels_signals")
# Here we combine the induced current on the same pixels by different tracks
threadsperblock = (8,8,8)
blockspergrid_x = ceil(signals.shape[0] / threadsperblock[0])
blockspergrid_y = ceil(signals.shape[1] / threadsperblock[1])
blockspergrid_z = ceil(signals.shape[2] / threadsperblock[2])
blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)
pixels_signals = cp.zeros((len(unique_pix), len(consts.time_ticks)*3))
detsim.sum_pixel_signals[blockspergrid,threadsperblock](pixels_signals,
signals,
track_starts,
pixel_index_map)
RangePop()
RangePush("get_adc_values")
# Here we simulate the electronics response (the self-triggering cycle) and the signal digitization
time_ticks = cp.linspace(0, len(unique_eventIDs)*consts.time_interval[1]*3, pixels_signals.shape[1]+1)
integral_list = cp.zeros((pixels_signals.shape[0], fee.MAX_ADC_VALUES))
adc_ticks_list = cp.zeros((pixels_signals.shape[0], fee.MAX_ADC_VALUES))
TPB = 128
BPG = ceil(pixels_signals.shape[0] / TPB)
rng_states = create_xoroshiro128p_states(TPB * BPG, seed=ievd)
fee.get_adc_values[BPG,TPB](pixels_signals,
time_ticks,
integral_list,
adc_ticks_list,
consts.time_interval[1]*3*tot_events,
rng_states)
adc_list = fee.digitize(integral_list)
RangePop()
RangePush("track_pixel_map")
# Mapping between unique pixel array and track array index
track_pixel_map = cp.full((unique_pix.shape[0], MAX_TRACKS_PER_PIXEL), -1)
TPB = 32
BPG = ceil(unique_pix.shape[0] / TPB)
detsim.get_track_pixel_map[BPG, TPB](track_pixel_map, unique_pix, neighboring_pixels)
RangePop()
RangePush("backtracking")
# Here we backtrack the ADC counts to the Geant4 tracks
TPB = 128
BPG = ceil(adc_list.shape[0] / TPB)
backtracked_id = cp.full((adc_list.shape[0], adc_list.shape[1], MAX_TRACKS_PER_PIXEL), -1)
detsim.backtrack_adcs[BPG,TPB](selected_tracks,
adc_list,
adc_ticks_list,
track_pixel_map,
event_id_map,
unique_eventIDs,
backtracked_id,
first_trk_id+itrk)
RangePop()
adc_tot_list = cp.concatenate((adc_tot_list, adc_list), axis=0)
adc_tot_ticks_list = cp.concatenate((adc_tot_ticks_list, adc_ticks_list), axis=0)
unique_pix_tot = cp.concatenate((unique_pix_tot, unique_pix), axis=0)
backtracked_id_tot = cp.concatenate((backtracked_id_tot, backtracked_id), axis=0)
tot_events += step
end_tracks_batch = time()
tracks_batch_runtimes.append(end_tracks_batch - start_tracks_batch)
print(f"- total time: {sum(tracks_batch_runtimes):.2f} s")
if len(tracks_batch_runtimes) > 1:
print(f"- excluding first iteration: {sum(tracks_batch_runtimes[1:]):.2f} s")
RangePush("Exporting to HDF5")
# Here we export the result in a HDF5 file.
fee.export_to_hdf5(cp.asnumpy(adc_tot_list),
cp.asnumpy(adc_tot_ticks_list),
cp.asnumpy(unique_pix_tot),
cp.asnumpy(backtracked_id_tot),
output_filename)
RangePop()
with h5py.File(output_filename, 'a') as f:
f.create_dataset("tracks", data=tracks)
print("Output saved in:", output_filename)
RangePop()
end_simulation = time()
print(f"run_simulation elapsed time: {end_simulation-start_simulation:.2f} s")
if __name__ == "__main__":
fire.Fire(run_simulation)
|
the-stack_106_16355
|
#注意,要用到谷歌无头浏览器哦,,可以自己去安装,
#教程https://www.jianshu.com/p/11d519e2d0cb
import os
import re
import requests
from tkinter import *
from lxml import etree
# 导入chrome无头浏览器
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
'''
下面的函数作用及功能:
1,获取到要下载书籍的最新(最大)章节,以便后面遍历章节需要。
2,获取到每一章节的名字与url,
3,遍历每一章节的url,获取到每一章节一共有多少分页(话)
4,调用函数【url_key(url) # 获取到最新章节图片的url,分析规则。】
5,调用函数【save_img(ZhangJie, pn_high, book_names, html) # 进行图片保存】
'''
def all_all(url, book_names):
headers = {
'User-Agent': 'Mozilla/5.0(Windows NT 10.0; Win64; x64)AppleWebKit/537.36(KHTML,like Gecko)Chrome/70.0.3538.77Safari/537.36',
}
req = requests.get(url, headers=headers) # .content#proxies=
html = etree.HTML(req.text)
pn_list = [] # 获取最新章节,或者是最大的章节
url_keys = []
for i in range(1, 5): # 获取前5个章节
pn2 = html.xpath('//*[@id="chapterList"]/li[{}]/a/@title'.format(i))
pn2 = re.findall('\d+', str(pn2))
if len(pn2) > 0:
# print(pn2[0],333333333333)
url_k = html.xpath('//*[@id="chapterList"]/li[{}]/a/@href'.format(i)) # 获取到最大章节的url
url_keys.append(url_k[0]) # 将最大章节URL 返回
pn_list.append(pn2[0]) # 将最大章节 返回
pn = max(pn_list) # 返回最大的章节
confirmLabel.delete(0, END) # 清空文本框
confirmLabel.insert(END, '{}一共有{}章'.format(book_names, pn), '\n', ) # 打印到GUI界面
confirmLabel.insert(END, '图片将保存在本程序运行的文件夹,请注意查看哦', '\n') # 打印到GUI界面
window.update() # 刷新文本框显示内容
htmls = url_key(url + url_keys[pn_list.index(pn)]) # 查找pn在列表里面的第几个,相应的取第几个URL
save_img(int(pn), url, book_names, htmls, html)
# 获取到最新章节图片的url,分析规则。
def url_key(url):
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(url)
webpage = driver.page_source
driver.close()
return webpage # 返回最新一章的页面内容,用作查看url规则
# 保存图片函数
def save_img(ZhangJie, url, book_names, htmls, html):
htmlsf = etree.HTML(htmls)
req = htmlsf.xpath('/html/body/div[1]/div[1]/div[2]/div[1]/img/@src')
urls = str(req)
s = re.findall('%E8%AF%9D.*%2F1', urls)
url_key = re.sub('%E8%AF%9D|%2F1|\[\'|\'|\]', '', str(s))
url_tou = re.sub('2F\d+\S+|\[\'', '', urls)
for i in range(1, ZhangJie + 1):
# for pn in range(1, int(pn_high[i - 1]) + 1):
# 获取到每一章节的名字与url
pnx = html.xpath(
'//*[@id="chapterList"]/li[{}]/a/@title|//*[@id="chapterList"]/li[{}]/a/@href'.format(i, i))
if str(pnx).find('话') >= 0:
req = requests.get(url + pnx[0])
htmlsa = etree.HTML(req.text)
url_f = htmlsa.xpath('//*[@class="totalPage"]') # 获取到章节最大页码
# pn_high.append(url_f[0].text)
pnp = url_f[0].text
for pn in range(1, int(pnp) + 1):
# 加key不加.webp
urlz = 'https:{}2F{}%E8%AF%9D{}%2F{}.jpg-zymk.middle'.format(url_tou, ZhangJie + 1 - i, url_key, pn)
# 加key 加.webp
urlz1 = 'https:{}2F{}%E8%AF%9D{}%2F{}.jpg-zymk.middle.webp'.format(url_tou, ZhangJie + 1 - i, url_key,pn)
# 不加key 加.webp
urlz2 = 'https:{}2F{}%E8%AF%9D{}%2F{}.jpg-zymk.middle.webp'.format(url_tou, ZhangJie + 1 - i, '', pn)
# 不加key 不加.webp
url_s = 'https:{}2F{}%E8%AF%9D%2F{}.jpg-zymk.middle'.format(url_tou, ZhangJie, pn)
# print(ZhangJie + 1 - i, url_key)
# print(urlz)
req_baocun = requests.get(urlz)
if req_baocun.status_code == 200:
# print(url)
with open('{}\第{}话-{}节.jpg'.format(book_names, ZhangJie + 1 - i, pn), 'wb') as f:
f.write(req_baocun.content)
# print('第{}话-{}节-保存完成'.format(ZhangJie+1-i, pn))
confirmLabel.insert(END, '{}第{}话-第{}节-保存完成'.format(book_names, ZhangJie + 1 - i, pn))
confirmLabel.see(END) # 光标移动到最后显示
window.update() # 刷新文本框显示内容
elif requests.get(urlz1).status_code == 200:
req_baocun = requests.get(urlz1)
# print(urlz1)
with open('{}\第{}话-{}节.jpg'.format(book_names, ZhangJie + 1 - i, pn), 'wb') as f:
f.write(req_baocun.content)
# print('第{}话-{}节-保存完成'.format(ZhangJie+1-i, pn))
confirmLabel.insert(END, '{}第{}话-第{}节-保存完成'.format(book_names, ZhangJie + 1 - i, pn))
confirmLabel.see(END) # 光标移动到最后显示
window.update() # 刷新文本框显示内容
elif requests.get(urlz2).status_code == 200:
req_baocun = requests.get(urlz2)
# print(urlz2)
with open('{}\第{}话-{}节.jpg'.format(book_names, ZhangJie + 1 - i, pn), 'wb') as f:
f.write(req_baocun.content)
# print('第{}话-{}节-保存完成'.format(ZhangJie+1-i, pn))
confirmLabel.insert(END, '{}第{}话-第{}节-保存完成'.format(book_names, ZhangJie + 1 - i, pn))
confirmLabel.see(END) # 光标移动到最后显示
window.update() # 刷新文本框显示内容
elif requests.get(url_s).status_code == 200:
# print("失败,重新拼接URL")
req_baocun_s = requests.get(url_s).content
print(url_s)
with open('{}\第{}话-{}节.jpg'.format(book_names, ZhangJie + 1 - i, pn), 'wb') as f:
f.write(req_baocun_s)
# print('第{}话-{}节-保存完成'.format(ZhangJie+1-i, pn))
confirmLabel.insert(END, '{}第{}话-第{}节-保存完成'.format(book_names, ZhangJie + 1 - i, pn))
confirmLabel.see(END) # 光标移动到最后显示
window.update() # 刷新文本框显示内容
print('已经全部保存完成')
confirmLabel.insert(END, '已经全部保存完成')
confirmLabel.see(END) # 光标移动到最后显示
window.update() # 刷新文本框显示内容
# 查找书籍的id函数
def book_name(namee):
n = namee
url = 'https://www.zymk.cn/api/getsortlist/?callback=getsortlistCb&key={}&topnum=20&client=pc'.format(n)
req = requests.get(url) # .content
res = re.findall('"comic_id":\d+', str(req.text))
res = re.findall('\d+', str(res))
res1 = re.findall('"comic_name":"\s*?\S*?"', str(req.text))
res1 = re.findall(':"\s*\S*"', str(res1))
res2 = re.sub(r'[\/\\\:\*\?"\<\>\|\[\]\.]', '', str(res1))
res2 = re.findall(r"'(.+?)'", res2)
zidian = dict(zip(res, res2))
# print('查找到以下内容:', '\n', zidian)
return zidian
# 选择要现在的书籍函数
def namee():
confirmLabel.delete(0, END) # 清空文本框
namee = namee_Entry.get()
zidian = book_name(namee) # 调用搜索书名函数
if len(zidian) != 0: # 返回的字典为空,提示重新输入,否则正常执行
confirmLabel.insert(END, '\t', ' 请双击要下载的漫画:', '\t')
for i in zidian.items():
confirmLabel.insert(END, i)
# confirmLabel.see(END) # 光标移动到最后显示
else:
# print('请输入要下载的漫画:')
confirmLabel.insert(END, '请输入要下载的漫画:' + '\t')
def xuanze(event): # 选择要下载的漫画
zidian = confirmLabel.get(confirmLabel.curselection())
# print(zidian)
if type(zidian) == tuple: # 判断点击是否为搜索的内容,
confirmLabel.delete(0, END) # 清空文本框
confirmLabel.insert(END, '开始下载:', '\n', zidian[1])
# print('开始下载 {}'.format(zidian[1]))
isExists = os.path.exists('./{}'.format(zidian[1]))
if not isExists:
os.mkdir(zidian[1])
# print('https://www.zymk.cn/{}/'.format(zidian[0]), zidian[1])
all_all('https://www.zymk.cn/{}/'.format(zidian[0]), zidian[1])
window = Tk()
window.geometry('600x600+500+200') # 窗口大小
window.title('漫画下载--本程序将搜索知音漫客网站信息')
taitouLabel = Label(window, text="请输入要下载的漫画: ", height=4, width=30, font=("Times", 20, "bold"), fg='red')
namee_Entry = Entry(window, width=25, font=("Times", 20, "bold"))
button = Button(window, text="搜索", command=namee, ) # .grid_location(33,44)
GunDongTiao = Scrollbar(window) # 设置滑动块组件
confirmLabel = Listbox(window, height=15, width=55, font=("Times", 15, "bold"), fg='red', bg='#EEE5DE',
yscrollcommand=GunDongTiao.set) # Listbox组件添加Scrollbar组件的set()方法
# window.iconbitmap('timg.ico')#设置窗口图标
confirmLabel.bind('<Double-Button-1>', xuanze) # 双击选择文本框的内容
GunDongTiao.config(command=confirmLabel.yview) # 设置Scrollbar组件的command选项为该组件的yview()方法
taitouLabel.grid(column=1)
namee_Entry.grid(row=1, column=1, sticky=N + S)
button.grid(row=1, column=1, sticky=E)
confirmLabel.grid(row=3, column=1, sticky=E)
GunDongTiao.grid(row=3, column=2, sticky=N + S + W) # 设置垂直滚动条显示的位置
window.mainloop()
|
the-stack_106_16356
|
#!/usr/bin/env python3
import argparse
import io
import json
import os
import sys
import copy
from datetime import datetime
from decimal import Decimal
from tempfile import NamedTemporaryFile, mkstemp
from joblib import Parallel, delayed, parallel_backend
from jsonschema import Draft4Validator, FormatChecker
from singer import get_logger
from target_postgres.db_sync import DbSync
LOGGER = get_logger('target_postgres')
DEFAULT_BATCH_SIZE_ROWS = 100000
DEFAULT_PARALLELISM = 0 # 0 The number of threads used to flush tables
DEFAULT_MAX_PARALLELISM = 16 # Don't use more than this number of threads by default when flushing streams in parallel
class RecordValidationException(Exception):
"""Exception to raise when record validation failed"""
class InvalidValidationOperationException(Exception):
"""Exception to raise when internal JSON schema validation process failed"""
def float_to_decimal(value):
"""Walk the given data structure and turn all instances of float into
double."""
if isinstance(value, float):
return Decimal(str(value))
if isinstance(value, list):
return [float_to_decimal(child) for child in value]
if isinstance(value, dict):
return {k: float_to_decimal(v) for k, v in value.items()}
return value
def add_metadata_columns_to_schema(schema_message):
"""Metadata _sdc columns according to the stitch documentation at
https://www.stitchdata.com/docs/data-structure/integration-schemas#sdc-columns
Metadata columns gives information about data injections
"""
extended_schema_message = schema_message
extended_schema_message['schema']['properties']['_sdc_extracted_at'] = {'type': ['null', 'string'],
'format': 'date-time'}
extended_schema_message['schema']['properties']['_sdc_batched_at'] = {'type': ['null', 'string'],
'format': 'date-time'}
extended_schema_message['schema']['properties']['_sdc_deleted_at'] = {'type': ['null', 'string']}
return extended_schema_message
def add_metadata_values_to_record(record_message):
"""Populate metadata _sdc columns from incoming record message
The location of the required attributes are fixed in the stream
"""
extended_record = record_message['record']
extended_record['_sdc_extracted_at'] = record_message.get('time_extracted')
extended_record['_sdc_batched_at'] = datetime.now().isoformat()
extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at')
return extended_record
def emit_state(state):
"""Emit state message to standard output then it can be
consumed by other components"""
if state is not None:
line = json.dumps(state)
LOGGER.debug('Emitting state %s', line)
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
# pylint: disable=too-many-locals,too-many-branches,too-many-statements,invalid-name,consider-iterating-dictionary
def persist_lines(config, lines) -> None:
"""Read singer messages and process them line by line"""
state = None
flushed_state = None
schemas = {}
key_properties = {}
validators = {}
records_to_load = {}
row_count = {}
stream_to_sync = {}
total_row_count = {}
batch_size_rows = config.get('batch_size_rows', DEFAULT_BATCH_SIZE_ROWS)
parallelism = config.get("parallelism", -1)
# Loop over lines from stdin
for line in lines:
try:
o = json.loads(line)
except json.decoder.JSONDecodeError:
LOGGER.error('Unable to parse:\n%s', line)
raise
if 'type' not in o:
raise Exception("Line is missing required key 'type': {}".format(line))
t = o['type']
if t == 'RECORD':
if 'stream' not in o:
raise Exception("Line is missing required key 'stream': {}".format(line))
if o['stream'] not in schemas:
raise Exception(
"A record for stream {} was encountered before a corresponding schema".format(o['stream']))
# Get schema for this record's stream
stream = o['stream']
# Validate record
if config.get('validate_records'):
try:
validators[stream].validate(float_to_decimal(o['record']))
except Exception as ex:
if type(ex).__name__ == "InvalidOperation":
raise InvalidValidationOperationException(
f"Data validation failed and cannot load to destination. RECORD: {o['record']}\n"
"multipleOf validations that allows long precisions are not supported (i.e. with 15 digits"
"or more) Try removing 'multipleOf' methods from JSON schema.")
raise RecordValidationException(f"Record does not pass schema validation. RECORD: {o['record']}")
primary_key_string = stream_to_sync[stream].record_primary_key_string(o['record'])
if not primary_key_string:
primary_key_string = 'RID-{}'.format(total_row_count[stream])
if stream not in records_to_load:
records_to_load[stream] = {}
# increment row count only when a new PK is encountered in the current batch
if primary_key_string not in records_to_load[stream]:
row_count[stream] += 1
total_row_count[stream] += 1
# append record
if config.get('add_metadata_columns') or config.get('hard_delete'):
records_to_load[stream][primary_key_string] = add_metadata_values_to_record(o)
else:
records_to_load[stream][primary_key_string] = o['record']
row_count[stream] = len(records_to_load[stream])
if row_count[stream] >= batch_size_rows:
# flush all streams, delete records if needed, reset counts and then emit current state
if config.get('flush_all_streams'):
filter_streams = None
else:
filter_streams = [stream]
# Flush and return a new state dict with new positions only for the flushed streams
flushed_state = flush_streams(records_to_load,
row_count,
stream_to_sync,
config,
state,
flushed_state,
filter_streams=filter_streams)
# emit last encountered state
emit_state(copy.deepcopy(flushed_state))
elif t == 'STATE':
LOGGER.debug('Setting state to %s', o['value'])
state = o['value']
# Initially set flushed state
if not flushed_state:
flushed_state = copy.deepcopy(state)
elif t == 'SCHEMA':
if 'stream' not in o:
raise Exception("Line is missing required key 'stream': {}".format(line))
stream = o['stream']
schemas[stream] = float_to_decimal(o['schema'])
validators[stream] = Draft4Validator(schemas[stream], format_checker=FormatChecker())
# flush records from previous stream SCHEMA
if row_count.get(stream, 0) > 0:
flushed_state = flush_streams(records_to_load, row_count, stream_to_sync, config, state, flushed_state)
# emit latest encountered state
emit_state(flushed_state)
# key_properties key must be available in the SCHEMA message.
if 'key_properties' not in o:
raise Exception("key_properties field is required")
# Log based and Incremental replications on tables with no Primary Key
# cause duplicates when merging UPDATE events.
# Stop loading data by default if no Primary Key.
#
# If you want to load tables with no Primary Key:
# 1) Set ` 'primary_key_required': false ` in the target-postgres config.json
# or
# 2) Use fastsync [postgres-to-postgres, mysql-to-postgres, etc.]
if config.get('primary_key_required', True) and len(o['key_properties']) == 0:
LOGGER.critical("Primary key is set to mandatory but not defined in the [%s] stream", stream)
raise Exception("key_properties field is required")
key_properties[stream] = o['key_properties']
if config.get('add_metadata_columns') or config.get('hard_delete'):
stream_to_sync[stream] = DbSync(config, add_metadata_columns_to_schema(o))
else:
stream_to_sync[stream] = DbSync(config, o)
stream_to_sync[stream].create_schema_if_not_exists()
stream_to_sync[stream].sync_table()
row_count[stream] = 0
total_row_count[stream] = 0
elif t == 'ACTIVATE_VERSION':
LOGGER.debug('ACTIVATE_VERSION message')
# Initially set flushed state
if not flushed_state:
flushed_state = copy.deepcopy(state)
else:
raise Exception("Unknown message type {} in message {}"
.format(o['type'], o))
# if some bucket has records that need to be flushed but haven't reached batch size
# then flush all buckets.
if sum(row_count.values()) > 0:
# flush all streams one last time, delete records if needed, reset counts and then emit current state
flushed_state = flush_streams(records_to_load, row_count, stream_to_sync, config, state, flushed_state)
# emit latest state
emit_state(copy.deepcopy(flushed_state))
# pylint: disable=too-many-arguments
def flush_streams(
streams,
row_count,
stream_to_sync,
config,
state,
flushed_state,
filter_streams=None):
"""
Flushes all buckets and resets records count to 0 as well as empties records to load list
:param streams: dictionary with records to load per stream
:param row_count: dictionary with row count per stream
:param stream_to_sync: Postgres db sync instance per stream
:param config: dictionary containing the configuration
:param state: dictionary containing the original state from tap
:param flushed_state: dictionary containing updated states only when streams got flushed
:param filter_streams: Keys of streams to flush from the streams dict. Default is every stream
:return: State dict with flushed positions
"""
parallelism = config.get("parallelism", DEFAULT_PARALLELISM)
max_parallelism = config.get("max_parallelism", DEFAULT_MAX_PARALLELISM)
# Parallelism 0 means auto parallelism:
#
# Auto parallelism trying to flush streams efficiently with auto defined number
# of threads where the number of threads is the number of streams that need to
# be loaded but it's not greater than the value of max_parallelism
if parallelism == 0:
n_streams_to_flush = len(streams.keys())
if n_streams_to_flush > max_parallelism:
parallelism = max_parallelism
else:
parallelism = n_streams_to_flush
# Select the required streams to flush
if filter_streams:
streams_to_flush = filter_streams
else:
streams_to_flush = streams.keys()
# Single-host, thread-based parallelism
with parallel_backend('threading', n_jobs=parallelism):
Parallel()(delayed(load_stream_batch)(
stream=stream,
records_to_load=streams[stream],
row_count=row_count,
db_sync=stream_to_sync[stream],
delete_rows=config.get('hard_delete'),
temp_dir=config.get('temp_dir')
) for stream in streams_to_flush)
# reset flushed stream records to empty to avoid flushing same records
for stream in streams_to_flush:
streams[stream] = {}
# Update flushed streams
if filter_streams:
# update flushed_state position if we have state information for the stream
if state is not None and stream in state.get('bookmarks', {}):
# Create bookmark key if not exists
if 'bookmarks' not in flushed_state:
flushed_state['bookmarks'] = {}
# Copy the stream bookmark from the latest state
flushed_state['bookmarks'][stream] = copy.deepcopy(state['bookmarks'][stream])
# If we flush every bucket use the latest state
else:
flushed_state = copy.deepcopy(state)
# Return with state message with flushed positions
return flushed_state
# pylint: disable=too-many-arguments
def load_stream_batch(stream, records_to_load, row_count, db_sync, delete_rows=False, temp_dir=None):
"""Load a batch of records and do post load operations, like creating
or deleting rows"""
# Load into Postgres
if row_count[stream] > 0:
flush_records(stream, records_to_load, row_count[stream], db_sync, temp_dir)
# Load finished, create indices if required
db_sync.create_indices(stream)
# Delete soft-deleted, flagged rows - where _sdc_deleted at is not null
if delete_rows:
db_sync.delete_rows(stream)
# reset row count for the current stream
row_count[stream] = 0
# pylint: disable=unused-argument
def flush_records(stream, records_to_load, row_count, db_sync, temp_dir=None):
"""Take a list of records and load into database"""
if temp_dir:
temp_dir = os.path.expanduser(temp_dir)
os.makedirs(temp_dir, exist_ok=True)
size_bytes = 0
csv_fd, csv_file = mkstemp(suffix='.csv', prefix=f'{stream}_', dir=temp_dir)
with open(csv_fd, 'w+b') as f:
for record in records_to_load.values():
csv_line = db_sync.record_to_csv_line(record)
f.write(bytes(csv_line + '\n', 'UTF-8'))
size_bytes = os.path.getsize(csv_file)
db_sync.load_csv(csv_file, row_count, size_bytes)
# Delete temp file
os.remove(csv_file)
def main():
"""Main entry point"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--config', help='Config file')
args = arg_parser.parse_args()
if args.config:
with open(args.config) as config_input:
config = json.load(config_input)
else:
config = {}
# Consume singer messages
singer_messages = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
persist_lines(config, singer_messages)
LOGGER.debug("Exiting normally")
if __name__ == '__main__':
main()
|
the-stack_106_16357
|
#!/usr/bin/env python3
#
# Copyright (c) 2018, Nicola Coretti
# All rights reserved.
import abc
import enum
import numbers
import functools
MAJOR_VERSION = 0
MINOR_VERSION = 4
PATCH_VERSION = 1
VERSION_TEMPLATE = '{major}.{minor}.{patch}'
LIBRARY_VERSION = VERSION_TEMPLATE.format(major=MAJOR_VERSION, minor=MINOR_VERSION, patch=PATCH_VERSION)
__author__ = 'Nicola Coretti'
__email__ = '[email protected]'
__version__ = LIBRARY_VERSION
class AbstractCrcRegister(metaclass=abc.ABCMeta):
"""
Abstract base class / Interface a crc register needs to implement.
Workflow:
1. The Crc-Register needs to be initialized. 1 time (init)
2. Data is feed into the crc register. 1..n times (update)
3. Final result is calculated. 1 time (digest)
"""
@abc.abstractmethod
def init(self):
"""
Initializes the crc register.
"""
pass
@abc.abstractmethod
def update(self, data):
"""
Feeds the provided data into the crc register.
:param bytes data: a bytes like object or ann object which can be converted to a bytes
like object using the built in bytes() function.
:return: the current value of the crc register.
"""
pass
@abc.abstractmethod
def digest(self):
"""
Final crc checksum will be calculated.
:return: the final crc checksum.
:rtype: int.
"""
pass
@abc.abstractmethod
def reverse(self):
"""
Calculates the reversed value of the crc register.
:return: the the reversed value of the crc register.
"""
pass
class Configuration(object):
"""
A Configuration provides all settings necessary to determine the concrete
implementation of a specific crc algorithm/register.
"""
def __init__(self, width, polynomial, init_value=0, final_xor_value=0, reverse_input=False, reverse_output=False):
self._width = width
self._polynomial = polynomial
self._init_value = init_value
self._final_xor_value = final_xor_value
self._reverse_input = reverse_input
self._reverse_output = reverse_output
@property
def width(self):
return self._width
@property
def polynomial(self):
return self._polynomial
@property
def init_value(self):
return self._init_value
@property
def final_xor_value(self):
return self._final_xor_value
@property
def reverse_input(self):
return self._reverse_input
@property
def reverse_output(self):
return self._reverse_output
class CrcRegisterBase(AbstractCrcRegister):
"""
Implements the common crc algorithm, assuming a user of this base
class will provide an overwrite for the _proces_byte method.
"""
def __init__(self, configuration):
"""
Create a new CrcRegisterBase.
:param configuration: used for the crc algorithm.
"""
if isinstance(configuration, enum.Enum):
configuration = configuration.value
self._topbit = 1 << (configuration.width - 1)
self._bitmask = 2 ** configuration.width - 1
self._config = configuration
self._register = configuration.init_value & self._bitmask
def __len__(self):
"""
Returns the length (width) of the register.
:return: the register size/width in bytes.
"""
return self._config.width // 8
def __getitem__(self, index):
"""
Gets a single byte of the register.
:param index: byte which shall be returned.
:return: the byte at the specified index.
:raises IndexError: if the index is out of bounce.
"""
if index >= (self._config.width / 8) or index < 0:
raise IndexError
shift_offset = index * 8
return (self.register & (0xFF << shift_offset)) >> shift_offset
def init(self):
"""
See AbstractCrcRegister.init
"""
self.register = self._config.init_value
def update(self, data):
"""
See AbstractCrcRegister.update
"""
for byte in data:
byte = Byte(byte)
if self._config.reverse_input:
byte = byte.reversed()
self._register = self._process_byte(byte)
return self.register
@abc.abstractmethod
def _process_byte(self, byte):
"""
Processes an entire byte feed to the crc register.
:param byte: the byte which shall be processed by the crc register.
:return: the new value of the crc register will have after the byte have been processed.
"""
pass
def digest(self):
"""
See AbstractCrcRegister.digest
"""
if self._config.reverse_output:
self.register = self.reverse()
return self.register ^ self._config.final_xor_value
def reverse(self):
"""
See AbstractCrcRegister.digest
"""
index = 0
reversed_value = 0
for byte in reversed(self):
reversed_value += int(Byte(byte).reversed()) << index
index += 8
return reversed_value
def _is_division_possible(self):
return (self.register & self._topbit) > 0
@property
def register(self):
return self._register & self._bitmask
@register.setter
def register(self, value):
self._register = value & self._bitmask
class CrcRegister(CrcRegisterBase):
"""
Simple crc register, which will process one bit at the time.
.. note:
If performance is an important issue for the crc calcualation use table
based register.
"""
def __init__(self, configuration):
super().__init__(configuration)
def _process_byte(self, byte):
"""
See CrcRegisterBase._process_byte
"""
self.register ^= int(byte) << (self._config.width - 8)
for bit in byte:
if self._is_division_possible():
self.register = (self.register << 1) ^ self._config.polynomial
else:
self.register <<= 1
return self.register
class TableBasedCrcRegister(CrcRegisterBase):
"""
Lookup table based crc register.
.. note::
this register type will be much faster than a simple bit by bit based crc register.
(e.g. CrcRegister)
"""
def __init__(self, configuration):
"""
Creates a new table based crc register.
:param configuration: used for the crc algorithm.
:attention: creating a table based register initaliy might take some extra time, due to the
fact that some lookup tables need to be calculated/initialized .
"""
super().__init__(configuration)
if isinstance(configuration, enum.Enum):
configuration = configuration.value
self._lookup_table = create_lookup_table(configuration.width, configuration.polynomial)
def _process_byte(self, byte):
"""
See CrcRegisterBase._process_byte
"""
index = int(byte) ^ (self.register >> (self._config.width - 8))
self.register = self._lookup_table[index] ^ (self.register << 8)
return self.register
class Byte(numbers.Number):
BIT_LENGTH = 8
BIT_MASK = 0xFF
def __init__(self, value=0x00):
self._value = value & Byte.BIT_MASK
def __add__(self, other):
if not isinstance(other, Byte):
other = Byte(other)
return Byte(self.value + other.value)
def __radd__(self, other):
return self + other
def __iadd__(self, other):
result = self + other
self.value = result.value
return self
def __eq__(self, other):
if not isinstance(other, Byte):
raise TypeError('unsupported operand')
return self.value == other.value
def __hash__(self):
return hash(self.value)
def __len__(self):
return Byte.BIT_LENGTH
def __getitem__(self, index):
if index >= Byte.BIT_LENGTH or index < 0:
raise IndexError
return (self.value & (1 << index)) >> index
def __int__(self):
return self.value
@property
def value(self):
return self._value & Byte.BIT_MASK
@value.setter
def value(self, value):
self._value = value & Byte.BIT_MASK
def reversed(self):
value = 0
index = 0
for bit in reversed(self):
value += bit << index
index += 1
return Byte(value)
@functools.lru_cache()
def create_lookup_table(width, polynom):
"""
Creates a crc lookup table.
:param int width: of the crc checksum.
:parma int polynom: which is used for the crc calculation.
"""
config = Configuration(width=width, polynomial=polynom)
crc_register = CrcRegister(config)
lookup_table = list()
for index in range(0, 256):
crc_register.init()
data = bytes((index).to_bytes(1, byteorder='big'))
crc_register.update(data)
lookup_table.append(crc_register.digest())
return lookup_table
class CrcCalculator(object):
def __init__(self, configuration, table_based=False):
"""
Creates a new CrcCalculator.
:param configuration: for the crc algortihm.
:param table_based: if true a tables based register will be used for the calculations.
:attention: initalizing a table based calculator might take some extra time, due to the
fact that the lookup table need to be initialized.
"""
if table_based:
self._crc_register = TableBasedCrcRegister(configuration)
else:
self._crc_register = CrcRegister(configuration)
def calculate_checksum(self, data):
self._crc_register.init()
self._crc_register.update(data)
return self._crc_register.digest()
def verify_checksum(self, data, expected_checksum):
return self.calculate_checksum(data) == expected_checksum
@enum.unique
class Crc8(enum.Enum):
CCITT = Configuration(
width=8,
polynomial=0x07,
init_value=0x00,
final_xor_value=0x00,
reverse_input=False,
reverse_output=False
)
SAEJ1850 = Configuration(
width=8,
polynomial=0x1D,
init_value=0x00,
final_xor_value=0x00,
reverse_input=False,
reverse_output=False
)
AUTOSAR = Configuration(
width=8,
polynomial=0x2F,
init_value=0xFF,
final_xor_value=0xFF,
reverse_input=False,
reverse_output=False
)
BLUETOOTH = Configuration(
width=8,
polynomial=0xA7,
init_value=0x00,
final_xor_value=0x00,
reverse_input=True,
reverse_output=True
)
@enum.unique
class Crc16(enum.Enum):
CCITT = Configuration(
width=16,
polynomial=0x1021,
init_value=0x0000,
final_xor_value=0x0000,
reverse_input=False,
reverse_output=False
)
GSM = Configuration(
width=16,
polynomial=0x1021,
init_value=0x0000,
final_xor_value=0xFFFF,
reverse_input=False,
reverse_output=False
)
PROFIBUS = Configuration(
width=16,
polynomial=0x1DCF,
init_value=0xFFFF,
final_xor_value=0xFFFF,
reverse_input=False,
reverse_output=False
)
@enum.unique
class Crc32(enum.Enum):
CRC32 = Configuration(
width=32,
polynomial=0x04C11DB7,
init_value=0xFFFFFFFF,
final_xor_value=0xFFFFFFFF,
reverse_input=True,
reverse_output=True
)
AUTOSAR = Configuration(
width=32,
polynomial=0xF4ACFB13,
init_value=0xFFFFFFFF,
final_xor_value=0xFFFFFFFF,
reverse_input=True,
reverse_output=True
)
BZIP2 = Configuration(
width=32,
polynomial=0x04C11DB7,
init_value=0xFFFFFFFF,
final_xor_value=0xFFFFFFFF,
reverse_input=False,
reverse_output=False
)
POSIX = Configuration(
width=32,
polynomial=0x04C11DB7,
init_value=0x00000000,
final_xor_value=0xFFFFFFFF,
reverse_input=False,
reverse_output=False
)
@enum.unique
class Crc64(enum.Enum):
CRC64 = Configuration(
width=64,
polynomial=0x42F0E1EBA9EA3693,
init_value=0x0000000000000000,
final_xor_value=0x0000000000000000,
reverse_input=False,
reverse_output=False
)
|
the-stack_106_16358
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d\-_]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_FIRST = 0
STATUS_SECOND = 1
class DNSResolver(object):
def __init__(self, server_list=None, prefer_ipv6=False):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
if prefer_ipv6:
self._QTYPES = [QTYPE_AAAA, QTYPE_A]
else:
self._QTYPES = [QTYPE_A, QTYPE_AAAA]
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if not (line and line.startswith(b'nameserver')):
continue
parts = line.split()
if len(parts) < 2:
continue
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) < 2:
continue
ip = parts[0]
if not common.is_ip(ip):
continue
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_SECOND) \
== STATUS_FIRST:
self._hostname_status[hostname] = STATUS_SECOND
self._send_req(hostname, self._QTYPES[1])
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) \
== STATUS_SECOND:
for question in response.questions:
if question[1] == self._QTYPES[1]:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_FIRST
self._send_req(hostname, self._QTYPES[0])
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, self._QTYPES[0])
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&[email protected]', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
the-stack_106_16359
|
import dlib
import numpy as np
import cv2
from PIL import Image
def interpolate_latents(latent_A, latent_B, ratio):
return latent_A + ratio * (latent_B - latent_A)
def interpolate_styles(style_A, style_B, ratio):
if style_A is None or style_B is None:
return
style = []
for s_A, s_B in zip(style_A, style_B):
style.append(s_A + ratio * (s_B - s_A))
return style
def interpolate_weights_deltas(weights_deltas_A, weights_deltas_B, ratio):
weights_deltas = []
for dw_A, dw_B in zip(weights_deltas_A, weights_deltas_B):
if dw_A is None or dw_B is None:
weights_deltas.append(None)
else:
weights_deltas.append(dw_A + ratio * (dw_B - dw_A))
return weights_deltas
def interpolation(p_A, p_B, n_frames):
pts = []
ratios = np.linspace(0, 1, n_frames).tolist()
latent_A, weights_deltas_A = p_A["latent"], p_A["weights_deltas"]
latent_B, weights_deltas_B = p_B["latent"], p_B["weights_deltas"]
for ratio in ratios:
latent = interpolate_latents(latent_A, latent_B, ratio)
weights_deltas = interpolate_weights_deltas(weights_deltas_A, weights_deltas_B, ratio)
pts.append({"latent": latent, "weights_deltas": weights_deltas})
return pts
# In what follows: https://github.com/Azmarie/Face-Morphing
class NoFaceFound(Exception):
"""Raised when there is no face found"""
pass
def calculate_margin_help(img1,img2):
size1 = img1.shape
size2 = img2.shape
diff0 = abs(size1[0]-size2[0])//2
diff1 = abs(size1[1]-size2[1])//2
avg0 = (size1[0]+size2[0])//2
avg1 = (size1[1]+size2[1])//2
return [size1,size2,diff0,diff1,avg0,avg1]
def crop_image(img1,img2):
[size1,size2,diff0,diff1,avg0,avg1] = calculate_margin_help(img1,img2)
if(size1[0] == size2[0] and size1[1] == size2[1]):
return [img1,img2]
elif(size1[0] <= size2[0] and size1[1] <= size2[1]):
scale0 = size1[0]/size2[0]
scale1 = size1[1]/size2[1]
if(scale0 > scale1):
res = cv2.resize(img2,None,fx=scale0,fy=scale0,interpolation=cv2.INTER_AREA)
else:
res = cv2.resize(img2,None,fx=scale1,fy=scale1,interpolation=cv2.INTER_AREA)
return crop_image_help(img1,res)
elif(size1[0] >= size2[0] and size1[1] >= size2[1]):
scale0 = size2[0]/size1[0]
scale1 = size2[1]/size1[1]
if(scale0 > scale1):
res = cv2.resize(img1,None,fx=scale0,fy=scale0,interpolation=cv2.INTER_AREA)
else:
res = cv2.resize(img1,None,fx=scale1,fy=scale1,interpolation=cv2.INTER_AREA)
return crop_image_help(res,img2)
elif(size1[0] >= size2[0] and size1[1] <= size2[1]):
return [img1[diff0:avg0,:],img2[:,-diff1:avg1]]
else:
return [img1[:,diff1:avg1],img2[-diff0:avg0,:]]
def crop_image_help(img1,img2):
[size1,size2,diff0,diff1,avg0,avg1] = calculate_margin_help(img1,img2)
if(size1[0] == size2[0] and size1[1] == size2[1]):
return [img1,img2]
elif(size1[0] <= size2[0] and size1[1] <= size2[1]):
return [img1,img2[-diff0:avg0,-diff1:avg1]]
elif(size1[0] >= size2[0] and size1[1] >= size2[1]):
return [img1[diff0:avg0,diff1:avg1],img2]
elif(size1[0] >= size2[0] and size1[1] <= size2[1]):
return [img1[diff0:avg0,:],img2[:,-diff1:avg1]]
else:
return [img1[:,diff1:avg1],img2[diff0:avg0,:]]
def generate_face_correspondences(theImage1, theImage2):
# Detect the points of face.
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('pretrained_models/shape_predictor_68_face_landmarks.dat')
corresp = np.zeros((68,2))
imgList = crop_image(theImage1,theImage2)
list1 = []
list2 = []
j = 1
for img in imgList:
size = (img.shape[0],img.shape[1])
if(j == 1):
currList = list1
else:
currList = list2
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
try:
if len(dets) == 0:
raise NoFaceFound
except NoFaceFound:
print("Sorry, but I couldn't find a face in the image.")
j=j+1
for k, rect in enumerate(dets):
# Get the landmarks/parts for the face in rect.
shape = predictor(img, rect)
# corresp = face_utils.shape_to_np(shape)
for i in range(0,68):
x = shape.part(i).x
y = shape.part(i).y
currList.append((x, y))
corresp[i][0] += x
corresp[i][1] += y
# cv2.circle(img, (x, y), 2, (0, 255, 0), 2)
# Add back the background
currList.append((1,1))
currList.append((size[1]-1,1))
currList.append(((size[1]-1)//2,1))
currList.append((1,size[0]-1))
currList.append((1,(size[0]-1)//2))
currList.append(((size[1]-1)//2,size[0]-1))
currList.append((size[1]-1,size[0]-1))
currList.append(((size[1]-1),(size[0]-1)//2))
# Add back the background
narray = corresp/2
narray = np.append(narray,[[1,1]],axis=0)
narray = np.append(narray,[[size[1]-1,1]],axis=0)
narray = np.append(narray,[[(size[1]-1)//2,1]],axis=0)
narray = np.append(narray,[[1,size[0]-1]],axis=0)
narray = np.append(narray,[[1,(size[0]-1)//2]],axis=0)
narray = np.append(narray,[[(size[1]-1)//2,size[0]-1]],axis=0)
narray = np.append(narray,[[size[1]-1,size[0]-1]],axis=0)
narray = np.append(narray,[[(size[1]-1),(size[0]-1)//2]],axis=0)
return [size,imgList[0],imgList[1],list1,list2,narray]
# Check if a point is inside a rectangle
def rect_contains(rect, point):
if point[0] < rect[0]:
return False
elif point[1] < rect[1]:
return False
elif point[0] > rect[2]:
return False
elif point[1] > rect[3]:
return False
return True
# Write the delaunay triangles into a file
def draw_delaunay(f_w, f_h, subdiv, dictionary1):
list4 = []
triangleList = subdiv.getTriangleList()
r = (0, 0, f_w, f_h)
for t in triangleList :
pt1 = (int(t[0]), int(t[1]))
pt2 = (int(t[2]), int(t[3]))
pt3 = (int(t[4]), int(t[5]))
if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3) :
list4.append((dictionary1[pt1],dictionary1[pt2],dictionary1[pt3]))
dictionary1 = {}
return list4
def make_delaunay(f_w, f_h, theList, img1, img2):
# Make a rectangle.
rect = (0, 0, f_w, f_h)
# Create an instance of Subdiv2D.
subdiv = cv2.Subdiv2D(rect)
# Make a points list and a searchable dictionary.
theList = theList.tolist()
points = [(int(x[0]),int(x[1])) for x in theList]
dictionary = {x[0]:x[1] for x in list(zip(points, range(76)))}
# Insert points into subdiv
for p in points :
subdiv.insert(p)
# Make a delaunay triangulation list.
list4 = draw_delaunay(f_w, f_h, subdiv, dictionary)
# Return the list.
return list4
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def apply_affine_transform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
return dst
# Warps and alpha blends triangular regions from img1 and img2 to img
def morph_triangle(img1, img2, img, t1, t2, t, alpha) :
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
r = cv2.boundingRect(np.float32([t]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
t2Rect = []
tRect = []
for i in range(0, 3):
tRect.append(((t[i][0] - r[0]),(t[i][1] - r[1])))
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
# Get mask by filling triangle
mask = np.zeros((r[3], r[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0)
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
img2Rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warpImage1 = apply_affine_transform(img1Rect, t1Rect, tRect, size)
warpImage2 = apply_affine_transform(img2Rect, t2Rect, tRect, size)
# Alpha blend rectangular patches
imgRect = (1.0 - alpha) * warpImage1 + alpha * warpImage2
# Copy triangular region of the rectangular patch to the output image
img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * ( 1 - mask ) + imgRect * mask
def generate_morph_sequence(duration,frame_rate,img1,img2,points1,points2,tri_list,size,output):
# num_images = int(duration*frame_rate)
# p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-r', str(frame_rate),'-s',str(size[1])+'x'+str(size[0]), '-i', '-', '-c:v', 'libx264', '-crf', '25','-vf','scale=trunc(iw/2)*2:trunc(ih/2)*2','-pix_fmt','yuv420p', output], stdin=PIPE)
for j in range(0, 1):
# Convert Mat to float data type
img1 = np.float32(img1)
img2 = np.float32(img2)
# Read array of corresponding points
points = []
# alpha = j/(num_images-1)
alpha = 0
# Compute weighted average point coordinates
for i in range(0, len(points1)):
x = (1 - alpha) * points1[i][0] + alpha * points2[i][0]
y = (1 - alpha) * points1[i][1] + alpha * points2[i][1]
points.append((x,y))
# Allocate space for final output
morphed_frame = np.zeros(img1.shape, dtype = img1.dtype)
for i in range(len(tri_list)):
x = int(tri_list[i][0])
y = int(tri_list[i][1])
z = int(tri_list[i][2])
t1 = [points1[x], points1[y], points1[z]]
t2 = [points2[x], points2[y], points2[z]]
t = [points[x], points[y], points[z]]
# Morph one triangle at a time.
morph_triangle(img1, img2, morphed_frame, t1, t2, t, alpha)
pt1 = (int(t[0][0]), int(t[0][1]))
pt2 = (int(t[1][0]), int(t[1][1]))
pt3 = (int(t[2][0]), int(t[2][1]))
cv2.line(morphed_frame, pt1, pt2, (255, 255, 255), 1, 8, 0)
cv2.line(morphed_frame, pt2, pt3, (255, 255, 255), 1, 8, 0)
cv2.line(morphed_frame, pt3, pt1, (255, 255, 255), 1, 8, 0)
# res = Image.fromarray(cv2.cvtColor(np.uint8(morphed_frame), cv2.COLOR_RGB2BGR))
res = Image.fromarray(np.uint8(morphed_frame)).convert("RGB")
# res.show()
# p.stdin.close()
# p.wait()
return res
def doMorphing(img1, img2, duration, frame_rate, output):
[size, img1, img2, points1, points2, list3] = generate_face_correspondences(img1, img2)
tri = make_delaunay(size[1], size[0], list3, img1, img2)
image = generate_morph_sequence(duration, frame_rate, img1, img2, points1, points2, tri, size, output)
return image
def getMesh(image):
img1 = np.array(image)
img2 = np.array(image)
duration = 2
frame_rate = 1
output = ".mp4"
image = doMorphing(img1, img2, duration, frame_rate, output)
return image
|
the-stack_106_16361
|
"""
Create a new file inside the actions application directory and name it utils.py
You need to define a shortcut function that will allow you to create new Action
objects in a simple way.
"""
import datetime
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from .models import Action
def create_action(user, verb, target=None):
"""
The create_action function allows you to create actions that optionally
include a target object. You can use this function anywhere in your code as
a shortcut to add new actions to the activity stream.
"""
# check for any similar action made in the last minute
now = timezone.now()
last_minute = now - datetime.timedelta(seconds=60)
similar_actions = Action.objects.filter(user_id=user.id,
verb=verb,
created__gte=last_minute)
if target:
target_ct = ContentType.objects.get_for_model(target)
similar_actions = similar_actions.filter(target_ct=target_ct,
target_id=target.id)
if not similar_actions:
# no existing actions found
action = Action(user=user, verb=verb, target=target)
action.save()
return True
return False
action = Action(user=user, verb=verb, target=target)
action.save()
|
the-stack_106_16365
|
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from .gltf2_blender_pbrMetallicRoughness import BlenderPbr
from .gltf2_blender_KHR_materials_pbrSpecularGlossiness import BlenderKHR_materials_pbrSpecularGlossiness
from .gltf2_blender_KHR_materials_unlit import BlenderKHR_materials_unlit
from .gltf2_blender_map_emissive import BlenderEmissiveMap
from .gltf2_blender_map_normal import BlenderNormalMap
from .gltf2_blender_map_occlusion import BlenderOcclusionMap
from ..com.gltf2_blender_material_helpers import get_output_surface_input
from ..com.gltf2_blender_material_helpers import get_preoutput_node_output
from ..com.gltf2_blender_material_helpers import get_base_color_node
from ...io.com.gltf2_io import MaterialPBRMetallicRoughness
class BlenderMaterial():
"""Blender Material."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, material_idx, vertex_color):
"""Material creation."""
pymaterial = gltf.data.materials[material_idx]
if vertex_color is None:
if pymaterial.name is not None:
name = pymaterial.name
else:
name = "Material_" + str(material_idx)
else:
if pymaterial.name is not None:
name = pymaterial.name + "_" + vertex_color
else:
name = "Material_" + str(material_idx) + "_" + vertex_color
mat = bpy.data.materials.new(name)
pymaterial.blender_material[vertex_color] = mat.name
if bpy.app.version < (2, 80, 0):
pass # Blender 2.79 did not have a per-material double-sided flag.
else:
mat.use_backface_culling = (pymaterial.double_sided != True)
ignore_map = False
if pymaterial.extensions is not None :
if 'KHR_materials_unlit' in pymaterial.extensions.keys():
ignore_map = True
BlenderKHR_materials_unlit.create(
gltf, material_idx,
pymaterial.extensions['KHR_materials_unlit'],
mat.name,
vertex_color
)
elif 'KHR_materials_pbrSpecularGlossiness' in pymaterial.extensions.keys():
BlenderKHR_materials_pbrSpecularGlossiness.create(
gltf, pymaterial.extensions['KHR_materials_pbrSpecularGlossiness'], mat.name, vertex_color
)
else:
# create pbr material
if pymaterial.pbr_metallic_roughness is None:
# If no pbr material is set, we need to apply all default of pbr
pbr = {}
pbr["baseColorFactor"] = [1.0, 1.0, 1.0, 1.0]
pbr["metallicFactor"] = 1.0
pbr["roughnessFactor"] = 1.0
pymaterial.pbr_metallic_roughness = MaterialPBRMetallicRoughness.from_dict(pbr)
pymaterial.pbr_metallic_roughness.color_type = gltf.SIMPLE
pymaterial.pbr_metallic_roughness.metallic_type = gltf.SIMPLE
BlenderPbr.create(gltf, pymaterial.pbr_metallic_roughness, mat.name, vertex_color)
if ignore_map == False:
# add emission map if needed
if pymaterial.emissive_texture is not None:
BlenderEmissiveMap.create(gltf, material_idx, vertex_color)
elif pymaterial.emissive_factor is not None:
# add emissive factor only if there is not emissive texture
BlenderEmissiveMap.create(gltf, material_idx, vertex_color, factor_only=True)
# add normal map if needed
if pymaterial.normal_texture is not None:
BlenderNormalMap.create(gltf, material_idx, vertex_color)
# add occlusion map if needed
# will be pack, but not used
if pymaterial.occlusion_texture is not None:
BlenderOcclusionMap.create(gltf, material_idx, vertex_color)
if pymaterial.alpha_mode is not None and pymaterial.alpha_mode != 'OPAQUE':
BlenderMaterial.blender_alpha(gltf, material_idx, vertex_color, pymaterial.alpha_mode)
@staticmethod
def set_uvmap(gltf, material_idx, prim, obj, vertex_color):
"""Set UV Map."""
pymaterial = gltf.data.materials[material_idx]
node_tree = bpy.data.materials[pymaterial.blender_material[vertex_color]].node_tree
uvmap_nodes = [node for node in node_tree.nodes if node.type in ['UVMAP', 'NORMAL_MAP']]
for uvmap_node in uvmap_nodes:
if uvmap_node["gltf2_texcoord"] in prim.blender_texcoord.keys():
uvmap_node.uv_map = prim.blender_texcoord[uvmap_node["gltf2_texcoord"]]
@staticmethod
def blender_alpha(gltf, material_idx, vertex_color, alpha_mode):
"""Set alpha."""
pymaterial = gltf.data.materials[material_idx]
material = bpy.data.materials[pymaterial.blender_material[vertex_color]]
# Set alpha value in material
if bpy.app.version < (2, 80, 0):
material.game_settings.alpha_blend = 'ALPHA'
else:
if alpha_mode == 'BLEND':
material.blend_method = 'BLEND'
elif alpha_mode == "MASK":
material.blend_method = 'CLIP'
alpha_cutoff = pymaterial.alpha_cutoff if pymaterial.alpha_cutoff is not None else 0.5
material.alpha_threshold = alpha_cutoff
node_tree = material.node_tree
# Add nodes for basic transparency
# Add mix shader between output and Principled BSDF
trans = node_tree.nodes.new('ShaderNodeBsdfTransparent')
trans.location = 750, -500
mix = node_tree.nodes.new('ShaderNodeMixShader')
mix.location = 1000, 0
output_surface_input = get_output_surface_input(node_tree)
preoutput_node_output = get_preoutput_node_output(node_tree)
link = output_surface_input.links[0]
node_tree.links.remove(link)
# PBR => Mix input 1
node_tree.links.new(preoutput_node_output, mix.inputs[1])
# Trans => Mix input 2
node_tree.links.new(trans.outputs['BSDF'], mix.inputs[2])
# Mix => Output
node_tree.links.new(mix.outputs['Shader'], output_surface_input)
# alpha blend factor
add = node_tree.nodes.new('ShaderNodeMath')
add.operation = 'ADD'
add.location = 750, -250
diffuse_factor = 1.0
if pymaterial.extensions is not None and 'KHR_materials_pbrSpecularGlossiness' in pymaterial.extensions:
diffuse_factor = pymaterial.extensions['KHR_materials_pbrSpecularGlossiness']['diffuseFactor'][3]
elif pymaterial.pbr_metallic_roughness:
diffuse_factor = pymaterial.pbr_metallic_roughness.base_color_factor[3]
add.inputs[0].default_value = abs(1.0 - diffuse_factor)
add.inputs[1].default_value = 0.0
node_tree.links.new(add.outputs['Value'], mix.inputs[0])
# Take diffuse texture alpha into account if any
diffuse_texture = get_base_color_node(node_tree)
if diffuse_texture:
inverter = node_tree.nodes.new('ShaderNodeInvert')
inverter.location = 250, -250
inverter.inputs[1].default_value = (1.0, 1.0, 1.0, 1.0)
node_tree.links.new(diffuse_texture.outputs['Alpha'], inverter.inputs[0])
mult = node_tree.nodes.new('ShaderNodeMath')
mult.operation = 'MULTIPLY' if pymaterial.alpha_mode == 'BLEND' else 'GREATER_THAN'
mult.location = 500, -250
# Note that `1.0 - pymaterial.alpha_cutoff` is used due to the invert node above.
alpha_cutoff = 1.0 if pymaterial.alpha_mode == 'BLEND' else \
1.0 - pymaterial.alpha_cutoff if pymaterial.alpha_cutoff is not None else 0.5
mult.inputs[1].default_value = alpha_cutoff
node_tree.links.new(inverter.outputs['Color'], mult.inputs[0])
node_tree.links.new(mult.outputs['Value'], add.inputs[0])
|
the-stack_106_16368
|
from __future__ import print_function
import os
from simpleflow import Workflow, activity
@activity.with_attributes(task_list='quickstart', version='example')
def repeat50k(s):
return s * 50000
@activity.with_attributes(task_list='quickstart', version='example')
def length(x):
return len(x)
class JumboFieldsWorkflow(Workflow):
"""
This workflow demonstrates how you can use simpleflow jumbo fields, e.g.
how simpleflow can automatically store input/results on S3 if their length
crosses the SWF limits (32KB for input/results).
"""
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, string):
if 'SIMPLEFLOW_JUMBO_FIELDS_BUCKET' not in os.environ:
print("Please define SIMPLEFLOW_JUMBO_FIELDS_BUCKET to run this example (see documentation).")
raise ValueError()
long_string = self.submit(repeat50k, str(string))
string_length = self.submit(length, long_string)
print('{} * 50k has a length of: {}'.format(
string, string_length.result))
|
the-stack_106_16369
|
'''
grammar.py: an object-oriented implementation of formal grammar
I attempted to create a general-purpose framework for defining formal grammars.
See the example notebook where I create a formal grammar that mimmicks
Barsalou's 1999 Perceptual Symbol Systems framework.
Author: Matthew A. Turner
Date: 2/24/2017
'''
import random
class Grammar:
"""
All we need to define a grammar are actually the production rules since the
terminal and non-terminal symbols may be inferred from that.
Arguments:
production_rules (list(ProductionRule)): list of production rules
that describe the allowed transitions
"""
def __init__(self, production_rules):
super(Grammar, self).__init__()
self.production_rules = production_rules
all_output_states = set(
output_state
for pr in production_rules
for output_state in pr.output_states
)
self.nonterminal_symbols = set(
pr.input_state for pr in production_rules
)
self.terminal_symbols = set(
output_state
for output_state in all_output_states
if output_state not in self.nonterminal_symbols
)
potential_sentence_symbol = set(
ns
for ns in self.nonterminal_symbols
if ns not in all_output_states
)
self.sentence_symbol = potential_sentence_symbol.pop()
if len(potential_sentence_symbol) > 0:
raise RuntimeError('more than one sentence symbol detected')
def print_latex(self):
return _latex_format_grammar(self)
def _latex_format_grammar(grammar):
from string import Template
s = Template(
r'''
\begin{equation}
\begin{array}{ll}
( & \\
& S = \{\textrm{ $sentence_symbol }\}, \\
& N = \{ $formatted_nonterminals \}, \\
& \Sigma = \{ $formatted_terminals \}, \\
&P = \{ \\
& \begin{array}{ll}
$formatted_production_rules
\end{array} \\
& \} \\
) &
\end{array}
\end{equation}
'''
)
def format_symbol_list(symbols):
return ', '.join(
r'\textrm{' + nonterm + r'}'
for nonterm in symbols
)
sentence_symbol = grammar.sentence_symbol
formatted_nonterminals = format_symbol_list(grammar.nonterminal_symbols)
formatted_terminals = format_symbol_list(grammar.terminal_symbols)
def format_production_rule(production_rule):
preamble = r'& ' + production_rule.input_state + r'\rightarrow'
output_states = production_rule.output_states
return preamble + '~|~'.join(
r'\textrm{' + term + r'}' for term in output_states
)
formatted_production_rules = '\n'.join(
format_production_rule(pr) + r' \\' for pr in grammar.production_rules
)
return s.substitute(sentence_symbol=sentence_symbol,
formatted_terminals=formatted_terminals,
formatted_nonterminals=formatted_nonterminals,
formatted_production_rules=formatted_production_rules)
class Language:
"""
Language takes as input a grammar and produces grammatical statements.
"""
def __init__(self, grammar):
self.production_state_lookup = {
ps.input_state: ps for ps in grammar.production_rules
}
self.root_production_rule =\
self.production_state_lookup[grammar.sentence_symbol]
self.grammar = grammar
def productions(self):
while True:
root = self.root_production_rule
next_pr = [root]
all_terminals = False
terminals = []
while not all_terminals:
output_states =\
[os for pr in next_pr for os in pr.produce()]
terminals.extend([
os
for os in output_states
if os in self.grammar.terminal_symbols
])
nonterminals = [
os
for os in output_states
if os not in terminals
]
next_pr = [
self.production_state_lookup[os]
for os in nonterminals
]
all_terminals = len(nonterminals) == 0
production = ' '.join(terminals)
yield production
class ProductionRule:
def __init__(self, input_state, output_states, n_outputs='one'):
"""
Arguments:
input_state (str): a single input state
output_states (str or list): either a single output state or a
set of output states
"""
self.input_state = input_state
self.n_outputs = n_outputs
if type(output_states) is str:
self.output_states = [output_states]
elif type(output_states) is list:
self.output_states = output_states
else:
raise RuntimeError('output_states must be type str or list')
self.transition_probabilities = [
(os, 1.0/len(self.output_states)) for os in self.output_states
]
def produce(self):
if self.n_outputs == 'one':
return [random.choice(self.output_states)]
elif self.n_outputs == 'many':
# choose a random number of outputs from 1 to all
return random.sample(
self.output_states,
random.choice(
range(1, len(self.output_states) + 1)
)
)
elif self.n_outputs == 'all':
return self.output_states
else:
raise RuntimeError('n_outputs must be one, many or all')
|
the-stack_106_16371
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Quicksilver(MakefilePackage):
"""Quicksilver is a proxy application that represents some elements of the
Mercury workload.
"""
tags = ['proxy-app']
homepage = "https://codesign.llnl.gov/quicksilver.php"
url = "https://github.com/LLNL/Quicksilver/tarball/V1.0"
git = "https://github.com/LLNL/Quicksilver.git"
maintainers = ['richards12']
version('master', branch='master')
version('1.0', sha256='83371603b169ec75e41fb358881b7bd498e83597cd251ff9e5c35769ef22c59a')
variant('openmp', default=True, description='Build with OpenMP support')
variant('mpi', default=True, description='Build with MPI support')
depends_on('mpi', when="+mpi")
build_directory = 'src'
@property
def build_targets(self):
targets = []
spec = self.spec
targets.append('CXXFLAGS={0}'.format(self.compiler.cxx11_flag))
if '+mpi' in spec:
targets.append('CXX={0}'.format(spec['mpi'].mpicxx))
else:
targets.append('CXX={0}'.format(spack_cxx))
if '+openmp+mpi' in spec:
targets.append('CPPFLAGS=-DHAVE_MPI -DHAVE_OPENMP {0}'.format(
self.compiler.openmp_flag))
elif '+openmp' in spec:
targets.append('CPPFLAGS=-DHAVE_OPENMP {0}'.format(
self.compiler.openmp_flag))
elif '+mpi' in spec:
targets.append('CPPFLAGS=-DHAVE_MPI')
if '+openmp' in self.spec:
targets.append('LDFLAGS={0}'.format(self.compiler.openmp_flag))
return targets
def install(self, spec, prefix):
mkdir(prefix.bin)
mkdir(prefix.doc)
install("src/qs", prefix.bin)
install('LICENSE.md', prefix.doc)
install('README.md', prefix.doc)
install_tree('Examples', prefix.Examples)
|
the-stack_106_16374
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
use_gpu=use_gpu)
results.append(result)
tolerance = 1e-2 if use_gpu else 1e-5
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
self.assertAllClose(expected, value.flatten(), atol=tolerance,
rtol=1e-6)
def testConv3D1x1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
19554., 19962., 20370., 22110., 22590., 23070., 34890., 35730., 36570.,
37446., 38358., 39270., 50226., 51498., 52770., 52782., 54126., 55470.
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
102.,
151.,
172.,
193.,
214.,
235.,
142.,
438.,
592.,
613.,
634.,
655.,
676.,
394.,
774.,
1033.,
1054.,
1075.,
1096.,
1117.,
646.,
1894.,
2503.,
2524.,
2545.,
2566.,
2587.,
1486.,
2230.,
2944.,
2965.,
2986.,
3007.,
3028.,
1738.,
2566.,
3385.,
3406.,
3427.,
3448.,
3469.,
1990.,
3686.,
4855.,
4876.,
4897.,
4918.,
4939.,
2830.,
4022.,
5296.,
5317.,
5338.,
5359.,
5380.,
3082.,
4358.,
5737.,
5758.,
5779.,
5800.,
5821.,
3334.,
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
36564., 38022., 39480., 37824., 39354., 40884., 39084., 40686., 42288.,
46644., 48678., 50712., 47904., 50010., 52116., 49164., 51342., 53520.,
107124., 112614., 118104., 108384., 113946., 119508., 109644., 115278.,
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
125934., 132144.
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
23844., 24534., 25224.
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
1484., 1592., 770., 2240., 2348., 1106., 1149., 1191., 539., 6776.,
6884., 3122., 7532., 7640., 3458., 3207., 3249., 1421., 3005., 3035.,
1225., 3215., 3245., 1309., 1013., 1022., 343.
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [1484., 1592., 2240., 2348., 6776., 6884., 7532., 7640.]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[50, 60])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
if test.is_gpu_available() and use_gpu:
data_type = dtypes.float32
# TOOD(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if test.is_gpu_available():
tolerance = 5e-3
else:
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
# Runs on a single machine can also generate slightly different errors
# because of multithreading.
tolerance = 8e-3
else:
data_type = dtypes.float64
tolerance = 1e-8
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
conv = nn_ops.conv3d(
input_tensor, filter_tensor, strides, padding,
data_format=data_format, name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
if test_input:
err = gradient_checker.compute_gradient_error(orig_input_tensor,
input_shape,
conv, output_shape)
else:
err = gradient_checker.compute_gradient_error(filter_tensor,
filter_shape, conv,
output_shape)
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
|
the-stack_106_16375
|
from requests import Response
class ResponseError(Exception):
"""Raised for all non 200 response code from API methods"""
def __init__(self, response: Response) -> None:
self.response = response
@property
def headers(self):
headers_str = ''
if self.response.headers:
for header, value in self.response.headers.items():
headers_str += f'{header}: {value}\n'
return headers_str
def __str__(self):
response_str = """
Response Failed
==========================
Status Code:
{status_code}
==========================
Headers:
{headers}
==========================
Response:
{response_text}
==========================
"""
response_str = response_str.replace(' ', '')
response_str = response_str.replace('{status_code}',
str(self.response.status_code))
response_str = response_str.replace('{headers}', self.headers)
response_str = response_str.replace('{response_text}',
self.response.text)
return response_str
class PayloadTooLargeError(Exception):
"""Raised if POST Payload is above 60KB"""
def __init__(self, size):
self.size = size
def __str__(self):
return f'Maximal size for payload is 60KB. Input was: {self.size} KB'
|
the-stack_106_16376
|
import argparse
import ast
import logging
import sys
import traceback
import zmq
import vistrails.core.db.io
from vistrails.core.db.locator import UntitledLocator, FileLocator
from vistrails.core.vistrail.controller import VistrailController
from mldebugger.utils import record_python_run
from mldebugger.pipeline import Pipeline
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
is_initialized = False
_application = None
is_not_sync = True
def initialize():
"""Initializes VisTrails.
You don't have to call this directly. Initialization will happen when you
start using the API.
"""
global is_initialized
global _application
if is_initialized:
return False
# Creates a core application
_application = vistrails.core.application.init(
options_dict={
'installBundles': False,
'loadPackages': False,
'enablePackagesSilently': True},
args=[])
is_initialized = True
return True
initialize()
parser = argparse.ArgumentParser()
parser.add_argument("--server", type=str, help="host responsible for execution requests")
parser.add_argument("--receive", type=str, help="port to receive messages on")
parser.add_argument("--send", type=str, help="port to send messages to")
args = parser.parse_args()
if args.server:
HOST = args.server
else:
HOST = 'localhost'
if args.receive:
RECEIVE = args.receive
else:
RECEIVE = '5557'
if args.send:
SEND = args.send
else:
SEND = '5558'
context = zmq.Context()
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://{0}:{1}".format(HOST, RECEIVE))
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://{0}:{1}".format(HOST, SEND))
while True:
# Receiving pipeline instance configuration
data = receiver.recv()
logging.debug('Receiving: ' + data)
fields = data.split("|")
filename = fields[0]
parameter_list = ast.literal_eval(fields[1])
inputs = ast.literal_eval(fields[2])
outputs = ast.literal_eval(fields[3])
locator = FileLocator(filename)
loaded_objs = vistrails.core.db.io.load_vistrail(locator)
controller = VistrailController(loaded_objs[0], locator,
*loaded_objs[1:])
controller.do_version_switch(
controller.get_latest_version_in_graph())
pipeline = Pipeline(controller)
kwargs = {}
for i in range(len(parameter_list)):
kwargs[inputs[i]] = parameter_list[i]
try:
#Executing pipeline instance and retieving the result
result = pipeline.execute(**kwargs)
for output in outputs:
parameter_list.append(str(result.output_port(output)))
except:
traceback.print_exc(file=sys.stdout)
parameter_list.append(str(False))
kwargs['result'] = parameter_list[-1]
record_python_run(kwargs, filename)
logging.debug('Pipeline result: ' + parameter_list[-1])
#Sending the instance result back to the Algorithm
sender.send_string(str(parameter_list))
|
the-stack_106_16377
|
from sharpy.plans.acts import *
from sharpy.plans.acts.terran import *
from sharpy.plans.require import *
from sharpy.plans.tactics import *
from sharpy.plans.tactics.terran import *
from sharpy.plans import BuildOrder, Step, StepBuildGas
from sc2 import UnitTypeId, Race
from sc2.ids.upgrade_id import UpgradeId
from sharpy.knowledges import KnowledgeBot
class TwoBaseTanks(KnowledgeBot):
def __init__(self):
super().__init__("Two base tanks")
async def create_plan(self) -> BuildOrder:
build_steps_scv = [
Step(None, ActUnit(UnitTypeId.SCV, UnitTypeId.COMMANDCENTER, 16 + 6),
skip=RequiredUnitExists(UnitTypeId.COMMANDCENTER, 2)),
Step(None, ActUnit(UnitTypeId.SCV, UnitTypeId.COMMANDCENTER, 32 + 12))
]
build_steps_buildings = [
Step(RequiredSupply(13), GridBuilding(UnitTypeId.SUPPLYDEPOT, 1)),
Step(RequiredUnitReady(UnitTypeId.SUPPLYDEPOT, 0.95),
GridBuilding(UnitTypeId.BARRACKS, 1)),
StepBuildGas(1, RequiredSupply(16)),
ActExpand(2),
Step(RequiredSupply(16), GridBuilding(UnitTypeId.SUPPLYDEPOT, 2)),
StepBuildGas(2, RequiredUnitExists(UnitTypeId.MARINE, 1, include_pending=True)),
Step(None, GridBuilding(UnitTypeId.FACTORY, 1),
skip_until=RequiredUnitReady(UnitTypeId.BARRACKS, 1)),
Step(None, ActBuildAddon(UnitTypeId.FACTORYTECHLAB, UnitTypeId.FACTORY, 1),
skip_until=RequiredUnitReady(UnitTypeId.FACTORY, 1)),
Step(RequiredSupply(28), GridBuilding(UnitTypeId.SUPPLYDEPOT, 4)),
Step(None, GridBuilding(UnitTypeId.FACTORY, 2)),
Step(None, ActBuildAddon(UnitTypeId.FACTORYTECHLAB, UnitTypeId.FACTORY, 2)),
Step(RequiredSupply(38), GridBuilding(UnitTypeId.SUPPLYDEPOT, 5)),
Step(None, ActExpand(3), skip_until=RequireCustom(self.should_expand)),
Step(None, ActExpand(4), skip_until=RequiredAll([RequireCustom(self.should_expand),
RequiredUnitReady(UnitTypeId.COMMANDCENTER, 3)])),
# BuildStep(None, GridBuilding(UnitTypeId.FACTORY, 3)),
StepBuildGas(3),
Step(RequiredSupply(45), GridBuilding(UnitTypeId.SUPPLYDEPOT, 8)),
Step(None, GridBuilding(UnitTypeId.BARRACKS, 2)),
Step(None, ActBuildAddon(UnitTypeId.BARRACKSTECHLAB, UnitTypeId.BARRACKS, 1)),
Step(None, ActTech(UpgradeId.SHIELDWALL, UnitTypeId.BARRACKSTECHLAB)),
StepBuildGas(4),
# BuildStep(None, GridBuilding(UnitTypeId.ARMORY, 1)),
Step(RequiredSupply(75), GridBuilding(UnitTypeId.SUPPLYDEPOT, 10)),
Step(None, GridBuilding(UnitTypeId.BARRACKS, 5)),
Step(None, ActBuildAddon(UnitTypeId.BARRACKSREACTOR, UnitTypeId.BARRACKS, 3)),
Step(None, GridBuilding(UnitTypeId.FACTORY, 3)),
Step(None, ActBuildAddon(UnitTypeId.FACTORYTECHLAB, UnitTypeId.FACTORY, 3)),
Step(RequiredSupply(85), GridBuilding(UnitTypeId.SUPPLYDEPOT, 14)),
]
build_steps_mech = [
# Step(RequiredUnitExists(UnitTypeId.FACTORY, 1), ActUnit(UnitTypeId.HELLION, UnitTypeId.FACTORY, 2)),
Step(RequiredUnitReady(UnitTypeId.FACTORYTECHLAB, 1), ActUnit(UnitTypeId.SIEGETANK, UnitTypeId.FACTORY, 20))
]
build_steps_marines = [
Step(RequiredUnitReady(UnitTypeId.BARRACKS, 1), ActUnit(UnitTypeId.MARINE, UnitTypeId.BARRACKS, 2)),
Step(RequiredMinerals(250), ActUnit(UnitTypeId.MARINE, UnitTypeId.BARRACKS, 100))
]
build_order = BuildOrder([
build_steps_scv,
build_steps_buildings,
build_steps_mech,
Step(None, MorphOrbitals(), skip_until=RequiredUnitReady(UnitTypeId.BARRACKS, 1)),
build_steps_marines,
ActBuildAddon(UnitTypeId.FACTORYTECHLAB, UnitTypeId.FACTORY, 99)
])
scout = Step(None, WorkerScout(), skip_until=RequiredUnitExists(UnitTypeId.BARRACKS, 1))
self.attack = PlanZoneAttack(60)
tactics = [
PlanCancelBuilding(),
LowerDepots(),
PlanZoneDefense(),
scout,
ScanEnemy(120),
CallMule(),
PlanDistributeWorkers(),
Repair(),
ContinueBuilding(),
PlanZoneGatherTerran(),
self.attack,
PlanFinishEnemy(),
]
return BuildOrder([
build_order,
tactics
])
def should_expand(self, knowledge):
count = 0
for zone in self.knowledge.our_zones:
if zone.our_townhall != None:
count += zone.our_townhall.surplus_harvesters
return count > 5
class LadderBot(TwoBaseTanks):
@property
def my_race(self):
return Race.Terran
|
the-stack_106_16378
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PreAuthorizedApplicationPermission(Model):
"""Contains information about the pre-authorized permissions.
:param direct_access_grant: Indicates whether the permission set is
DirectAccess or impersonation.
:type direct_access_grant: bool
:param access_grants: The list of permissions.
:type access_grants: list[str]
"""
_attribute_map = {
'direct_access_grant': {'key': 'directAccessGrant', 'type': 'bool'},
'access_grants': {'key': 'accessGrants', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(PreAuthorizedApplicationPermission, self).__init__(**kwargs)
self.direct_access_grant = kwargs.get('direct_access_grant', None)
self.access_grants = kwargs.get('access_grants', None)
|
the-stack_106_16379
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import time
from collections import namedtuple
from random import choice
from pysensu_yelp import Status
from paasta_tools import monitoring_tools
from paasta_tools.chronos_tools import compose_check_name_for_service_instance
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
try:
from scribereader import scribereader
except ImportError:
scribereader = None
OOM_EVENTS_STREAM = 'tmp_paasta_oom_events'
OOMEvent = namedtuple('OOMEvent', ['hostname', 'container_id', 'process_name'])
def parse_args(args):
parser = argparse.ArgumentParser(description=(
'Check the %s stream and report to Sensu if'
' there are any OOM events.' % OOM_EVENTS_STREAM
))
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-r', '--realert-every', dest="realert_every", type=int, default=1,
help="Sensu 'realert_every' to use.",
)
parser.add_argument(
'-s', '--superregion', dest="superregion", required=True,
help="The superregion to read OOM events from.",
)
return parser.parse_args(args)
def read_oom_events_from_scribe(cluster, superregion, num_lines=1000):
"""Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them."""
host_port = choice(scribereader.get_default_scribe_hosts(tail=True))
stream = scribereader.get_stream_tailer(
stream_name=OOM_EVENTS_STREAM,
tailing_host=host_port['host'],
tailing_port=host_port['port'],
use_kafka=True,
lines=num_lines,
superregion=superregion,
)
for line in stream:
try:
j = json.loads(line)
if j.get('cluster', '') == cluster:
yield j
except json.decoder.JSONDecodeError:
pass
def latest_oom_events(cluster, superregion, interval=60):
"""
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
"""
start_timestamp = int(time.time()) - interval
res = {}
for e in read_oom_events_from_scribe(cluster, superregion):
if e['timestamp'] > start_timestamp:
key = (e['service'], e['instance'])
res.setdefault(key, []).append(
OOMEvent(
hostname=e.get('hostname', ''),
container_id=e.get('container_id', ''),
process_name=e.get('process_name', ''),
),
)
return res
def compose_sensu_status(instance, oom_events, is_check_enabled):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
"""
if not is_check_enabled:
return (
Status.OK, 'This check is disabled for {}.{}.'.format(
instance.service,
instance.instance,
),
)
if len(oom_events) == 0:
return (
Status.OK, 'No oom events for %s.%s in the last minute.' %
(instance.service, instance.instance),
)
else:
return (
Status.CRITICAL, 'The Out Of Memory killer killed %d processes (%s) '
'in the last minute in %s.%s containers.' % (
len(oom_events),
','.join(sorted({e.process_name for e in oom_events if e.process_name})),
instance.service,
instance.instance,
),
)
def send_sensu_event(instance, oom_events, args):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
"""
check_name = compose_check_name_for_service_instance(
'oom-killer',
instance.service,
instance.instance,
)
monitoring_overrides = instance.get_monitoring()
status = compose_sensu_status(
instance=instance,
oom_events=oom_events,
is_check_enabled=monitoring_overrides.get('check_oom_events', True),
)
monitoring_overrides.update({
'page': False,
'ticket': False,
'alert_after': '0m',
'realert_every': args.realert_every,
'runbook': 'y/check-oom-events',
'tip': 'Try bumping the memory limit past %dMB' % instance.get_mem(),
})
return monitoring_tools.send_event(
service=instance.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status[0],
output=status[1],
soa_dir=instance.soa_dir,
)
def main(sys_argv):
args = parse_args(sys_argv[1:])
cluster = load_system_paasta_config().get_cluster()
victims = latest_oom_events(cluster, args.superregion)
for (service, instance) in get_services_for_cluster(cluster, soa_dir=args.soa_dir):
try:
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=args.soa_dir,
)
oom_events = victims.get((service, instance), [])
send_sensu_event(instance_config, oom_events, args)
except NotImplementedError: # When instance_type is not supported by get_instance_config
pass
if __name__ == '__main__':
main(sys.argv)
|
the-stack_106_16380
|
#####################################################################
# #
# camera_server.py #
# #
# Copyright 2016, Monash University #
# #
# This file is part of the labscript suite (see #
# http://labscriptsuite.org) and is licensed under the Simplified #
# BSD License. See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import sys
import time
import zprocess
from labscript_utils import check_version
import labscript_utils.shared_drive
# importing this wraps zlock calls around HDF file openings and closings:
import labscript_utils.h5_lock
import h5py
import numpy as np
check_version('zprocess', '1.3.3', '3.0')
# This file implements the protocol for a camera server, that is, a program
# that BLACS can interface with to control cameras. It contains a class that
# acts as a camera server and communicates with BLACS over zeromq. The
# protocol is as below. A user need not implement this protocol themselves,
# they instead should subclass CameraServer and override the
# transition_to_buffered(), transition_to_static(), and abort() methods. An
# example is show at the bottom of this file. Note that the filepath send from
# BLACS to the camera server has a 'network agnostic' prefix - it is assumed
# that BLACS and the camera server may not have the same path to the location
# of the HDF5 file, it may be on a shared drive with different drive
# letters/mount points on the two computers. So BLACS calls
# labscript_utils.shared_drive.path_to_agnostic() on the filepath before
# sending it, and the camera server should call
# labscript_utils.shared_drive.path_to_local() once receiving it. If you
# subclass CameraServer, you don't have to worry about this step, so long as
# the shared drive path is correctly declared in your labconfig file.
#
# All communications are as utf-8 encoded strings.
#
# Ping, can occur at any time:
# BLACS sends: 'hello'
# CameraServer responds: 'hello'
#
# transition_to_buffered, occurs when BLACS is preparing to start a shot:
# BLACS sends: '<utf8-encoded-path-of-h5-file->.h5'
# CameraServer responds: 'ok'
# BLACS sends: '' (empty string)
# (Camera server calls self.transition_to_buffered(), to do any processing
# it needs to to set up the shot)
# CameraServer responds: 'done'
# OR, if exception encountered calling self.transition_to_buffered(), camera
# server calls self.abort() and then responds with the exception text.
#
# transition_to_static, occurs when BLACS has completed a shot:
# BLACS sends: 'done'
# CameraServer responds: 'ok'
# BLACS sends: '' (empty string)
# (Camera server calls self.transition_to_static(), to do any processing it
# needs to do at the end of the shot)
# CameraServer responds: 'done'
# OR, if exception encountered calling self.transition_to_static(), camera
# server calls self.abort() and then responds with the exception text.
#
# abort, can occur at any time:
# BLACS sends 'abort'
# (Camera server calls self.abort(), to return things to a sensible state
# where transition_to_buffered can be called again )
# CameraServer responds: 'done'
# OR, if exception encountered calling self.abort(), camera server responds
# with the exception text.
#
class CameraServer(zprocess.ZMQServer):
def __init__(self, port):
zprocess.ZMQServer.__init__(self, port, type='string')
self._h5_filepath = None
def handler(self, request_data):
try:
print(request_data)
if request_data == 'hello':
return 'hello'
elif request_data.endswith('.h5'):
self._h5_filepath = labscript_utils.shared_drive.path_to_local(request_data)
self.send('ok')
self.recv()
self.transition_to_buffered(self._h5_filepath)
return 'done'
elif request_data == 'done':
self.send('ok')
self.recv()
self.transition_to_static(self._h5_filepath)
self._h5_filepath = None
return 'done'
elif request_data == 'abort':
self.abort()
self._h5_filepath = None
return 'done'
else:
raise ValueError('invalid request: %s'%request_data)
except Exception:
if self._h5_filepath is not None and request_data != 'abort':
try:
self.abort()
except Exception as e:
sys.stderr.write('Exception in self.abort() while handling another exception:\n{}\n'.format(str(e)))
self._h5_filepath = None
raise
def transition_to_buffered(self, h5_filepath):
"""To be overridden by subclasses. Do any preparatory processing
before a shot, eg setting exposure times, readying cameras to receive
triggers etc."""
print('transition to buffered')
def transition_to_static(self, h5_filepath):
"""To be overridden by subclasses. Do any post processing after a
shot, eg computing optical depth, fits, displaying images, saving
images and results to the h5 file, returning cameras to an idle
state."""
print('transition to static')
def abort(self):
"""To be overridden by subclasses. Return cameras and any other state
to one in which transition_to_buffered() can be called again. abort()
will be called if there was an exception in either
transition_to_buffered() or transtition_to_static(), and so should
ideally be written to return things to a sensible state even if those
methods did not complete. Like any cleanup function, abort() should
proceed to further cleanups even if earlier cleanups fail. As such it
should make liberal use of try: except: blocks, so that an exception
in performing one cleanup operation does not stop it from proceeding
to subsequent cleanup operations"""
print('abort')
# A minimalistic example of how to subclass a CameraServer:
class TubingenCameraServer(CameraServer):
"""Minimalistic camera server. Transition to buffered and abort are not
implemented, because we don't need to do anything in those cases. This
camera server simply writes to the h5 file the images, which have been
saved to disk during each shot by an external program."""
def transition_to_buffered(self, h5_filepath):
# Our minimalistic example doesn't need to implement this method,
# since the camera we used simply saved images to disk every time
# it received a trigger, and didn't need any per-shot
# configuration. But here is where you would put code to get the
# camera ready for a shot, with its configuration possibly
# depending on the contents of the h5 file, such as the globals in
# h5_file['globals'].attrs.
pass
def transition_to_static(self, h5_filepath):
"""Read FITS images from file saved by an external program, and save
them to the h5 file"""
import pyfits
start_time = time.time()
with h5py.File(h5_filepath) as f:
group = f['devices']['camera']
if not 'EXPOSURES' in group:
print('no images taken this shot')
return
group = f.create_group('images').create_group('side').create_group('absorption')
with pyfits.open(r'C:\CameraControl\images\1_0_0.fits') as fits_images:
image_array = np.array(fits_images[0].data, dtype=float)
group.create_dataset('atoms',data=image_array)
with pyfits.open(r'C:\CameraControl\images\1_0_1.fits') as fits_images:
image_array = np.array(fits_images[0].data, dtype=float)
group.create_dataset('flat',data=image_array)
with pyfits.open(r'C:\CameraControl\images\1_0_2.fits') as fits_images:
image_array = np.array(fits_images[0].data, dtype=float)
group.create_dataset('dark',data=image_array)
# Copy over the effective pixel size to a spot that lyse
# automatically grabs params from:
effective_pixel_size = f['/devices/camera'].attrs['effective_pixel_size']
f['images/side'].attrs['effective_pixel_size'] = effective_pixel_size
print('image saving time: %s s' %str(time.time() - start_time))
def abort(self):
# Our minimalistic example doesn't need to implement this method,
# since the camera we used was always ready and didn't need to be
# 'reset' to be ready for a new shot. But here is where you would
# put cleanup code to do so. Likely this would be very similar to
# transition_to_static, except without saving any data to a h5 file.
pass
if __name__ == '__main__':
# How to run a camera server:
port = 8765
print('starting camera server on port %d...' % port)
server = CameraServer(port)
server.shutdown_on_interrupt()
|
the-stack_106_16382
|
"""
DEPRECATED soon. (I haven't tested this code since 2014)
This plugin allows you to receive test notifications through HipChat.
Mentions only occur during normal business hours. (Can be changed)
By default, only failure notifications will be sent.
"""
import os
import requests
import logging
import datetime
from nose.plugins import Plugin
from seleniumbase.config import settings
HIPCHAT_URL = 'https://api.hipchat.com/v1/rooms/message'
HIPCHAT_AUTH_TOKEN = settings.HIPCHAT_AUTH_TOKEN
class HipchatReporting(Plugin):
'''
Usage: --with-hipchat_reporting --hipchat_room_id=[HIPCHAT ROOM ID]
--hipchat_owner_to_mention=[HIPCHAT @NAME]
'''
name = 'hipchat_reporting'
def __init__(self):
super(HipchatReporting, self).__init__()
self.hipchat_room_id = None
self.hipchat_owner_to_mention = None
self.hipchat_notify_on_success = False
self.build_url = os.environ.get('BUILD_URL')
self.successes = []
self.failures = []
self.errors = []
def options(self, parser, env):
super(HipchatReporting, self).options(parser, env=env)
parser.add_option(
'--hipchat_room_id', action='store',
dest='hipchat_room_id',
help='The hipchat room ID notifications will be sent to.',
default=None)
parser.add_option(
'--hipchat_owner_to_mention', action='store',
dest='hipchat_owner_to_mention',
help='The hipchat username to @mention in notifications.',
default=None)
parser.add_option(
'--hipchat_notify_on_success', action='store_true',
default=False,
dest='hipchat_notify_on_success',
help='''Flag for including success notifications.
If not specified, only notifies on errors/failures
by default.''')
def configure(self, options, conf):
super(HipchatReporting, self).configure(options, conf)
if not self.enabled:
return
if not options.hipchat_room_id:
raise Exception('''A hipchat room ID to notify must be specified
when using the hipchat reporting plugin.''')
else:
self.hipchat_room_id = options.hipchat_room_id
self.hipchat_owner_to_mention = (
options.hipchat_owner_to_mention or None)
self.hipchat_notify_on_success = options.hipchat_notify_on_success
def addSuccess(self, test, capt):
self.successes.append(test.id())
def addError(self, test, err, capt=None):
self.errors.append("ERROR: " + test.id())
def addFailure(self, test, err, capt=None, tbinfo=None):
self.failures.append("FAILED: " + test.id())
def finalize(self, result):
message = ''
success = True
if not result.wasSuccessful():
success = False
if (self.hipchat_owner_to_mention and
self._is_during_business_hours()):
message += "@" + self.hipchat_owner_to_mention + '\n'
if self.failures:
message += "\n".join(self.failures)
if self.errors:
message += '\n'
if self.errors:
message += "\n".join(self.errors)
if self.build_url:
message += '\n' + self.build_url
elif self.hipchat_notify_on_success and self.successes:
message = "SUCCESS! The following tests ran successfully:\n+ "
message += "\n+ ".join(self.successes)
if message:
self._send_hipchat_notification(message, success=success)
def _is_during_business_hours(self):
now = datetime.datetime.now()
# Mon - Fri, 9am-6pm
return now.weekday() <= 4 and now.hour >= 9 and now.hour <= 18
def _send_hipchat_notification(self, message, success=True,
sender='Selenium'):
response = requests.post(HIPCHAT_URL, params={
'auth_token': HIPCHAT_AUTH_TOKEN,
'room_id': self.hipchat_room_id,
'from': sender,
'message': message,
'message_format': 'text',
'color': 'green' if success else 'red',
'notify': '0',
'format': 'json'
})
if response.status_code == 200:
logging.debug("Notification sent to room %s", self.hipchat_room_id)
return True
else:
logging.error("Failed to send notification to room %s",
self.hipchat_room_id)
return False
|
the-stack_106_16383
|
import adafruit_tlc5947
import board
import busio
import digitalio
import time
SCK = board.SCK
MOSI = board.MOSI
LATCH = digitalio.DigitalInOut(board.D22)
number_of_boards = 3
number_of_channels = number_of_boards * 24
spi = busio.SPI(clock=SCK, MOSI=MOSI)
tlc5947 = adafruit_tlc5947.TLC5947(spi, LATCH,num_drivers=number_of_boards)
pins = [0]*(number_of_channels)
print("number of pins=", len(pins))
for channel in range(len(pins)):
print(channel)
pins[channel] = tlc5947.create_pwm_out(channel)
groups = {
"TRAIL_ROLLOVER_RIGHT" : [16,15,14,13,12],
"TRAIL_ROLLOVER_LEFT" : [19,20,21,22,23],
"TRAIL_SLING_RIGHT" : [11,10,9],
"TRAIL_SLING_LEFT" : [0,1,2],
"TRAIL_POP_LEFT" : [66,65,64,63,62,61,60],
"TRAIL_POP_CENTER" : [69,68,67],
"TRAIL_POP_RIGHT" : [36,37,38],
"TRAIL_SPINNER" : [39,40,41,42,43,44,45,46,47],
"PIE_ROLLOVER_RIGHT" : [27,28,29],
"PIE_ROLLOVER_LEFT" : [54,55,56],
"PIE_SLING_RIGHT" : [24,25,26],
"PIE_SLING_LEFT" : [57,58,59],
"PIE_POP_LEFT" : [51,52,52],
"PIE_POP_CENTER" : [48,49,50],
"PIE_POP_RIGHT" : [33,34,35],
"PIE_SPINNER" : [30,31,32],
"SIGN_ARROW_LEFT" : [5,4,3],
"SIGN_ARROW_RIGHT" : [6,7,8],
"SIGN_BOTTOM_LEFT" : [17],
"SIGN_BOTTOM_RIGHT" : [18],
"SIGN_TOP" : [70,71],
}
group_names = [
"PIE_ROLLOVER_RIGHT",
"PIE_ROLLOVER_LEFT",
"PIE_SLING_RIGHT",
"PIE_SLING_LEFT",
"PIE_SPINNER",
"PIE_POP_RIGHT",
"PIE_POP_CENTER",
"PIE_POP_LEFT",
"SIGN_ARROW_RIGHT",
"SIGN_ARROW_LEFT",
"SIGN_BOTTOM_RIGHT",
"SIGN_BOTTOM_LEFT",
"SIGN_TOP",
"TRAIL_ROLLOVER_RIGHT",
"TRAIL_ROLLOVER_LEFT",
"TRAIL_SLING_RIGHT",
"TRAIL_SLING_LEFT",
"TRAIL_SPINNER",
"TRAIL_POP_RIGHT",
"TRAIL_POP_CENTER",
"TRAIL_POP_LEFT",
"TRAIL_SLING_LEFT"
]
while True:
for group_name in group_names:
led_group = groups[group_name]
for led in led_group:
pins[led].duty_cycle = 40000
time.sleep(1)
for led in led_group:
pins[led].duty_cycle = 0
#time.sleep(1)
|
the-stack_106_16384
|
# coding: utf-8
"""
Swagger Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OuterNumber(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
OuterNumber - a model defined in Swagger
"""
self.discriminator = None
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OuterNumber):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_16385
|
import csv
import numpy as np
def getDataSource(data_path):
size_of_tv = []
Average_time_spent = []
with open(data_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
size_of_tv.append(float(row["Size of TV"]))
Average_time_spent.append(float(row["\tAverage time spent watching TV in a week (hours)"]))
return {"x" : size_of_tv, "y": Average_time_spent}
def findCorrelation(datasource):
correlation = np.corrcoef(datasource["x"], datasource["y"])
print("Correlation between Size of Tv and Average time spent watching Tv in a week :- \n--->",correlation[0,1])
def setup():
data_path = "./data/Size of TV,Average time spent watching TV in a week (hours).csv"
datasource = getDataSource(data_path)
findCorrelation(datasource)
setup()
|
the-stack_106_16387
|
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from api.authentication.serializers import RegisterSerializer
class RegisterViewSet(viewsets.ModelViewSet):
http_method_names = ["post"]
permission_classes = (AllowAny,)
serializer_class = RegisterSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response(
{
"success": True,
"userID": user.id,
"msg": "The user was successfully registered",
},
status=status.HTTP_201_CREATED,
)
|
the-stack_106_16389
|
import importlib
import pathlib
import os
import pandas as pd
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
# constants = importlib.import_module("apps.dash-oil-gas-ternary.constants")
import constants
# app initialize
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
server = app.server
app.config["suppress_callback_exceptions"] = True
# mapbox
mapbox_access_token = "pk.eyJ1IjoieWNhb2tyaXMiLCJhIjoiY2p1MDR5c3JmMzJsbjQ1cGlhNHA3MHFkaCJ9.xb3lXp5JskCYFewsv5uU1w"
# Load data
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
df = pd.read_csv(os.path.join(APP_PATH, os.path.join("data", "test_composition.csv")))
df_prod = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "YearlyProduction_table_1.csv"))
)
# Assign color to legend
colormap = {}
for ind, formation_name in enumerate(df["fm_name"].unique().tolist()):
colormap[formation_name] = constants.colors[ind]
def build_banner():
return html.Div(
id="banner",
className="banner",
children=[
html.Img(src=app.get_asset_url("dash-logo.png")),
html.H6("Oil and gas ternary map"),
],
)
def build_graph_title(title):
return html.P(className="graph-title", children=title)
def generate_production_plot(processed_data):
"""
:param processed_data: List containing two lists, one containing well ID information, and the second containing
rock formation type associated with the well
:return: Figure object
"""
layout = dict(
xaxis=dict(title="Year"), yaxis=dict(title="GAS Production (mcf)", type="log")
)
data = []
for well_id, formation in list(
zip(processed_data["well_id"], processed_data["formation"])
):
well_prod = df_prod[df_prod["RecordNumber"] == well_id]
new_trace = dict(
x=well_prod["Year"],
y=well_prod["VolumeMCF"],
name=str(well_id),
mode="lines+markers",
hoverinfo="x+y+name",
marker=dict(
symbol="hexagram-open", line={"width": "0.5"}, color=colormap[formation]
),
line=dict(shape="spline"),
showlegend=True,
)
data.append(new_trace)
return {"data": data, "layout": layout}
def generate_well_map(dff, selected_data, style):
"""
Generate well map based on selected data.
:param dff: dataframe for generate plot.
:param selected_data: Processed dictionary for plot generation with defined selected points.
:param style: mapbox visual style.
:return: Plotly figure object.
"""
layout = go.Layout(
clickmode="event+select",
dragmode="lasso",
showlegend=True,
autosize=True,
hovermode="closest",
margin=dict(l=0, r=0, t=0, b=0),
mapbox=go.layout.Mapbox(
accesstoken=mapbox_access_token,
bearing=0,
center=go.layout.mapbox.Center(lat=37.497562, lon=-82.755728),
pitch=0,
zoom=8,
style=style,
),
legend=dict(
bgcolor="#1f2c56",
orientation="h",
font=dict(color="white"),
x=0,
y=0,
yanchor="bottom",
),
)
formations = dff["fm_name"].unique().tolist()
data = []
for formation in formations:
selected_index = None
if formation in selected_data:
selected_index = selected_data[formation]
text_list = list(
map(
lambda item: "Well ID:" + str(int(item)),
dff[dff["fm_name"] == formation]["RecordNumber"],
)
)
op_list = dff[dff["fm_name"] == formation]["op"].tolist()
text_list = [op_list[i] + "<br>" + text_list[i] for i in range(len(text_list))]
new_trace = go.Scattermapbox(
lat=dff[dff["fm_name"] == formation]["nlat83"],
lon=dff[dff["fm_name"] == formation]["wlon83"],
mode="markers",
marker={"color": colormap[formation], "size": 9},
text=text_list,
name=formation,
selectedpoints=selected_index,
customdata=dff[dff["fm_name"] == formation]["RecordNumber"],
)
data.append(new_trace)
return {"data": data, "layout": layout}
def generate_ternary_map(dff, selected_data, contour_visible, marker_visible):
"""
Generate ternary plot based on selected data.
:param dff: dataframe for generate plot.
:param selected_data: Processed dictionary for plot generation with defined selected points.
:param contour_visible: Contour trace visibility.
:param marker_visible: Marker trace visibility.
:return: ternary map figure object.
"""
# Generate contour
contour_traces = []
for ind, key in enumerate(constants.ternary_contour.keys()):
trace = dict(
name=key,
type="scatterternary",
a=[k["Quartz"] for k in constants.ternary_contour[key]],
b=[k["Carbonate"] for k in constants.ternary_contour[key]],
c=[k["Clay"] for k in constants.ternary_contour[key]],
mode="lines",
line=dict(color="#444", width=0.5),
fill="toself",
fillcolor=constants.ternary_color[ind],
opacity=0.8,
hoverinfo="none",
showlegend=False,
visible=contour_visible,
)
contour_traces.append(trace)
contour_text = generate_contour_text_layer(contour_visible)
# Layout
layout = {
"dragmode": "lasso",
"ternary": {
"sum": 100,
"aaxis": {
"title": {
"text": "Quartz",
"font": {"family": "Open Sans", "size": 15, "color": "white"},
},
"min": -2,
"linewidth": 1.5,
"ticks": "outside",
},
"baxis": {
"title": {
"text": "Carbonate",
"font": {"family": "Open Sans", "size": 15, "color": "white"},
},
"min": -2,
"linewidth": 1.5,
"ticks": "outside",
},
"caxis": {
"title": {
"text": "Clay",
"font": {"family": "Open Sans", "size": 15, "color": "white"},
},
"min": -2,
"linewidth": 1.5,
"ticks": "outside",
},
},
"margin": dict(l=110, r=50, t=50, b=50),
"paper_bgcolor": "#192444",
"plot_bgcolor": "#192444",
"showLegend": False,
"font": {"color": "white"},
"annotations": {"visible": False},
"autosize": True,
}
hovertemplate = "<b> %{text}</b><br><br> Quartz: %{a:.0f}<br>Carbonate : %{b:.0f}<br> Clay: %{c:.0f}<extra></extra>"
formations = dff["fm_name"].unique().tolist()
data_traces = []
for key in formations:
if selected_data:
select_indices = selected_data[key]
else:
select_indices = None
new_data_trace = dict(
text=list(
map(
lambda item: "Well ID:" + str(int(item)),
dff[dff["fm_name"] == key]["RecordNumber"],
)
),
name=key,
customdata=dff[dff["fm_name"] == key]["RecordNumber"],
type="scatterternary",
a=dff[dff["fm_name"] == key]["Quartz"],
b=dff[dff["fm_name"] == key]["Carbonate"],
c=dff[dff["fm_name"] == key]["Clay"],
mode="markers",
hovertemplate=hovertemplate,
showlegend=False,
marker={
"color": colormap[key],
"size": 8,
"line": {"color": "#000000", "width": 0.2},
},
selectedpoints=select_indices,
visible=marker_visible,
)
data_traces.append(new_data_trace)
return {"data": contour_traces + contour_text + data_traces, "layout": layout}
def generate_contour_text_layer(contour_visible):
layer = []
for key, value in constants.ternary_contour.items():
a = np.mean([i["Quartz"] for i in value])
b = np.mean([i["Carbonate"] for i in value])
c = np.mean([i["Clay"] for i in value])
key_br = key.replace(" ", "<br>")
new_trace = generate_contour_text(a, b, c, key, key_br, contour_visible)
layer.append(new_trace)
return layer
def generate_contour_text(a, b, c, name, text, visible):
return dict(
type="scatterternary",
a=[a],
b=[b],
c=[c],
name=name,
text=text,
mode="text",
hoverinfo="none",
textposition="middle center",
textfont={"size": 11, "color": "#000000", "family": "sans-serif"},
showlegend=False,
legendgroup="Rock type",
visible=visible,
)
def generate_formation_bar(dff, selected_data):
"""
Generate bar plot based on selected data.
:param dff: dataframe for generate plot.
:param selected_data: Processed dictionary for plot generation with defined selected points.
:return: ternary map figure object.
"""
layout = go.Layout(
showlegend=False,
hovermode="closest",
xaxis=dict(tickangle=-45, title="Formations"),
yaxis=dict(title="Well Counts"),
clickmode="event+select",
)
formations = dff["fm_name"].unique().tolist()
if selected_data:
data = []
for i in formations:
selected_points = []
select_indices = selected_data[i]
if select_indices is not None and len(select_indices) > 0:
selected_points = [0]
new_trace = go.Bar(
x=[i],
y=[len(dff[dff["fm_name"] == i])],
name=i,
hoverinfo="x+y",
marker={"color": colormap[i]},
selectedpoints=selected_points,
)
data.append(new_trace)
else:
data = []
for i in formations:
new_trace = go.Bar(
x=[i],
y=[len(dff[dff["fm_name"] == i])],
name=i,
marker={"color": colormap[i]},
selectedpoints=None,
)
data.append(new_trace)
return {"data": data, "layout": layout}
# Helper for extracting select index from mapbox and tern selectData
def get_selection(data, formation, selection_data, starting_index):
ind = []
current_curve = data["fm_name"].unique().tolist().index(formation)
for point in selection_data["points"]:
if point["curveNumber"] - starting_index == current_curve:
ind.append(point["pointNumber"])
return ind
# Helper for extracting select index from bar
def get_selection_by_bar(bar_selected_data):
dict = {}
if bar_selected_data is not None:
for point in bar_selected_data["points"]:
if point["x"] is not None:
dict[(point["x"])] = list(range(0, point["y"]))
return dict
app.layout = html.Div(
children=[
html.Div(
id="top-row",
children=[
html.Div(
className="row",
id="top-row-header",
children=[
html.Div(
id="header-container",
children=[
build_banner(),
html.P(
id="instructions",
children="Select data points from the well map, ternary map or bar graph to "
"visualize cross-filtering to other plots. Selection could be done by "
"clicking on individual data points or using the lasso tool to capture "
"multiple data points or bars. With the box tool from modebar, multiple "
"regions can be selected by holding the SHIFT key while clicking and "
"dragging.",
),
build_graph_title("Select Operator"),
dcc.Dropdown(
id="operator-select",
options=[
{"label": i, "value": i}
for i in df["op"].unique().tolist()
],
multi=True,
value=[
df["op"].unique().tolist()[0],
df["op"].unique().tolist()[1],
],
),
],
)
],
),
html.Div(
className="row",
id="top-row-graphs",
children=[
# Well map
html.Div(
id="well-map-container",
children=[
build_graph_title("Well Map"),
dcc.RadioItems(
id="mapbox-view-selector",
options=[
{"label": "basic", "value": "basic"},
{"label": "satellite", "value": "satellite"},
{"label": "outdoors", "value": "outdoors"},
{
"label": "satellite-street",
"value": "mapbox://styles/mapbox/satellite-streets-v9",
},
],
value="basic",
),
dcc.Graph(
id="well-map",
figure={
"layout": {
"paper_bgcolor": "#192444",
"plot_bgcolor": "#192444",
}
},
config={"scrollZoom": True, "displayModeBar": True},
),
],
),
# Ternary map
html.Div(
id="ternary-map-container",
children=[
html.Div(
id="ternary-header",
children=[
build_graph_title(
"Shale Mineralogy Composition"
),
dcc.Checklist(
id="ternary-layer-select",
options=[
{
"label": "Well Data",
"value": "Well Data",
},
{
"label": "Rock Type",
"value": "Rock Type",
},
],
value=["Well Data", "Rock Type"],
),
],
),
dcc.Graph(
id="ternary-map",
figure={
"layout": {
"paper_bgcolor": "#192444",
"plot_bgcolor": "#192444",
}
},
config={
"scrollZoom": True,
"displayModeBar": False,
},
),
],
),
],
),
],
),
html.Div(
className="row",
id="bottom-row",
children=[
# Formation bar plots
html.Div(
id="form-bar-container",
className="six columns",
children=[
build_graph_title("Well count by formations"),
dcc.Graph(id="form-by-bar"),
],
),
html.Div(
# Selected well productions
id="well-production-container",
className="six columns",
children=[
build_graph_title("Individual well annual production"),
dcc.Graph(id="production-fig"),
],
),
],
),
]
)
# Update bar plot
@app.callback(
Output("form-by-bar", "figure"),
[
Input("well-map", "selectedData"),
Input("ternary-map", "selectedData"),
Input("operator-select", "value"),
],
)
def update_bar(map_selected_data, tern_selected_data, op_select):
dff = df[df["op"].isin(op_select)]
formations = dff["fm_name"].unique().tolist()
# Find which one has been triggered
ctx = dash.callback_context
prop_id = ""
prop_type = ""
if ctx.triggered:
splitted = ctx.triggered[0]["prop_id"].split(".")
prop_id = splitted[0]
prop_type = splitted[1]
processed_data = {}
if prop_id == "well-map" and prop_type == "selectedData":
for formation in formations:
if map_selected_data is None:
processed_data[formation] = [
0
] # [0] is the default value to select current bar
else:
processed_data[formation] = get_selection(
dff, formation, map_selected_data, 0
)
elif prop_id == "ternary-map" and prop_type == "selectedData":
for formation in formations:
if tern_selected_data is None:
processed_data[formation] = [0]
else:
processed_data[formation] = get_selection(
dff, formation, tern_selected_data, 32
)
else:
for formation in formations:
processed_data[formation] = [0]
return generate_formation_bar(dff, processed_data)
# Update ternary map
@app.callback(
Output("ternary-map", "figure"),
[
Input("well-map", "selectedData"),
Input("form-by-bar", "selectedData"),
Input("form-by-bar", "clickData"),
Input("operator-select", "value"),
Input("ternary-layer-select", "value"),
],
state=[State("ternary-map", "figure")],
)
def update_ternary_map(
map_selected_data,
bar_selected_data,
bar_click_data,
op_select,
layer_select,
curr_fig,
):
marker_visible = contour_visible = True
dff = df[df["op"].isin(op_select)]
formations = dff["fm_name"].unique().tolist()
# Find which one has been triggered
ctx = dash.callback_context
if ctx.triggered:
splitted = ctx.triggered[0]["prop_id"].split(".")
prop_id = splitted[0]
prop_type = splitted[1]
processed_data = {}
if prop_id != "ternary-layer-select":
if prop_id == "well-map" and prop_type == "selectedData":
for formation in formations:
if map_selected_data is None:
processed_data[formation] = None
else:
processed_data[formation] = get_selection(
dff, formation, map_selected_data, 0
)
elif prop_id == "form-by-bar" and prop_type == "selectedData":
processed_data = get_selection_by_bar(bar_selected_data)
for formation in formations:
if bar_selected_data is None:
processed_data[formation] = None
elif formation not in processed_data:
processed_data[formation] = []
elif prop_id == "form-by-bar" and prop_type == "clickData":
processed_data = get_selection_by_bar(bar_click_data)
for formation in formations:
if bar_click_data is None:
processed_data[formation] = None
elif formation not in processed_data:
processed_data[formation] = []
else:
for formation in formations:
processed_data[formation] = None
return generate_ternary_map(
dff, processed_data, contour_visible, marker_visible
)
if prop_id == "ternary-layer-select":
if curr_fig is not None:
if "Well Data" not in layer_select:
marker_visible = "legendonly"
if "Rock Type" not in layer_select:
contour_visible = "legendonly"
for contour_dict in curr_fig["data"][:32]:
contour_dict["visible"] = contour_visible
for marker_dict in curr_fig["data"][32:]:
marker_dict["visible"] = marker_visible
return curr_fig
else:
return curr_fig
# Update well map
@app.callback(
Output("well-map", "figure"),
[
Input("ternary-map", "selectedData"),
Input("form-by-bar", "selectedData"),
Input("form-by-bar", "clickData"),
Input("operator-select", "value"),
Input("mapbox-view-selector", "value"),
],
)
def update_well_map(
tern_selected_data, bar_selected_data, bar_click_data, op_select, mapbox_view
):
dff = df[df["op"].isin(op_select)]
formations = dff["fm_name"].unique().tolist()
# Find which one has been triggered
ctx = dash.callback_context
prop_id = ""
prop_type = ""
if ctx.triggered:
splitted = ctx.triggered[0]["prop_id"].split(".")
prop_id = splitted[0]
prop_type = splitted[1]
processed_data = {}
if prop_id == "ternary-map":
for formation in formations:
if tern_selected_data is None:
processed_data[formation] = None
else:
processed_data[formation] = get_selection(
dff, formation, tern_selected_data, 32
)
elif prop_id == "form-by-bar":
bar_data = ""
if prop_type == "selectedData":
bar_data = bar_selected_data
elif prop_type == "clickData":
bar_data = bar_click_data
processed_data = get_selection_by_bar(bar_data)
for formation in formations:
if bar_data is None:
processed_data[formation] = None
elif formation not in processed_data:
processed_data[formation] = []
else:
for formation in formations:
processed_data[formation] = None
return generate_well_map(dff, processed_data, mapbox_view)
# Update production plot
@app.callback(
Output("production-fig", "figure"),
[
Input("well-map", "selectedData"),
Input("ternary-map", "selectedData"),
Input("form-by-bar", "selectedData"),
Input("operator-select", "value"),
],
)
def update_production(map_select, tern_select, bar_select, op_select):
dff = df[df["op"].isin(op_select)]
# Find which one has been triggered
ctx = dash.callback_context
prop_id = ""
prop_type = ""
if ctx.triggered:
splitted = ctx.triggered[0]["prop_id"].split(".")
prop_id = splitted[0]
prop_type = splitted[1]
processed_data_init = {}
processed_data_init["well_id"] = dff["RecordNumber"].tolist()
processed_data_init["formation"] = dff["fm_name"].tolist()
if prop_id == "well-map" and prop_type == "selectedData":
if map_select is not None:
processed_data = {"well_id": [], "formation": []}
for point in map_select["points"]:
processed_data["well_id"].append(point["customdata"])
processed_data["formation"].append(
dff[dff["RecordNumber"] == point["customdata"]]["fm_name"].tolist()[
0
]
)
else:
processed_data = processed_data_init
elif prop_id == "ternary-map" and prop_type == "selectedData":
if tern_select is not None:
processed_data = {"well_id": [], "formation": []}
for point in tern_select["points"]:
if "customdata" in point:
processed_data["well_id"].append(point["customdata"])
processed_data["formation"].append(
dff[dff["RecordNumber"] == point["customdata"]][
"fm_name"
].tolist()[0]
)
else:
processed_data = processed_data_init
elif prop_id == "form-by-bar" and prop_type == "selectedData":
if bar_select is not None:
processed_data = {"well_id": [], "formation": []}
# Find all wells according to selected formation category
for point in bar_select["points"]:
selected_form = point["x"]
selected_well = dff[dff["fm_name"] == point["x"]][
"RecordNumber"
].tolist()
for well in selected_well:
processed_data["well_id"].append(int(well))
processed_data["formation"].append(selected_form)
else:
processed_data = processed_data_init
else:
processed_data = processed_data_init
return generate_production_plot(processed_data)
# Running the server
if __name__ == "__main__":
app.run_server(debug=True)
|
the-stack_106_16390
|
import base64
import datetime
import json
import os
import traceback
import frappe
import jwt
import requests
from requests.auth import HTTPBasicAuth
def validate():
"""
Additional validation to execute along with frappe request
"""
authorization_header = frappe.get_request_header("Authorization", str()).split(" ")
if len(authorization_header) == 2:
token = authorization_header[1]
if frappe.get_conf().get("castlecraft_auth_introspect_bearer_enabled"):
validate_bearer_with_introspection(token)
def validate_bearer_with_introspection(token):
"""
Validates access_token by using introspection endpoint
Caches the token upto expiry for reuse
"""
is_valid = False
email = None
cached_token = frappe.cache().get_value(f"cc_bearer|{token}")
now = datetime.datetime.now()
form_dict = frappe.local.form_dict
token_response = {}
try:
if cached_token:
token_json = json.loads(cached_token)
exp = token_json.get("exp")
email = frappe.get_value("User", token_json.get("email"), "email")
if exp:
exp = datetime.datetime.fromtimestamp(int(token_json.get("exp")))
else:
exp = now
if now < exp and email:
token_response = token_json
is_valid = True
else:
frappe.cache().delete_key(f"cc_bearer|{token}")
else:
client_id = frappe.get_conf().get("castlecraft_client_id")
client_secret = frappe.get_conf().get("castlecraft_client_secret")
introspect_url = frappe.get_conf().get("castlecraft_introspect_url")
introspect_token_key = frappe.get_conf().get(
"castlecraft_introspect_token_key", "token"
)
auth_header_enabled = frappe.get_conf().get("castlecraft_auth_header_enabled")
auth = None
if not introspect_url:
return
if auth_header_enabled and client_id and client_secret:
auth = HTTPBasicAuth(client_id, client_secret)
data = {}
data[introspect_token_key] = token
r = requests.post(
introspect_url,
data=data,
auth=auth,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
token_response = r.json()
exp = token_response.get("exp")
if exp:
exp = datetime.datetime.fromtimestamp(int(token_response.get("exp")))
else:
exp = now + datetime.timedelta(0, int(token_response.get("expires_in")) or 0)
if now < exp:
email = frappe.get_value("User", token_response.get("email"), "email")
if email and token_response.get("email"):
frappe.cache().set_value(
f"cc_bearer|{token}", json.dumps(token_response), expires_in_sec=exp - now,
)
is_valid = True
if frappe.get_conf().get(
"castlecraft_create_user_on_auth_enabled"
) and not frappe.db.exists("User", email):
user = create_and_save_user(token_response)
frappe.cache().set_value(
f"cc_bearer|{token}", json.dumps(token_response), expires_in_sec=exp - now,
)
is_valid = True
if is_valid:
frappe.set_user(email)
frappe.local.form_dict = form_dict
except:
frappe.log_error(traceback.format_exc(), "castlecraft_bearer_auth_failed")
def create_and_save_user(body):
"""
Create new User and save based on response
"""
user = frappe.new_doc("User")
user.email = body.get("email")
user.first_name = body.get("name")
user.full_name = body.get("name")
if body.get("phone_number_verified"):
user.phone = body.get("phone_number")
user.flags.ignore_permissions = 1
user.flags.no_welcome_mail = True
user.save()
frappe.db.commit()
return user
|
the-stack_106_16391
|
import marshal
import os
import secrets
import glob
from . import exceptions as _except
from . import polyfill
from .table import Table
from .document import Document
from .chunk import Chunk
from .autogenerateid import AutoGenerateId
from .console import Console
import atexit
name = "tasho"
class Database():
"""Database.new(String:database_file, **options) returns tasho.database.Database
Creates a new database object, a folder will be created
with the same name as the database name.
Options:
chunk_size=Int:8192
> Table chunk size.
auto_commit=Bool:False
> Commits upon storing data
(useful for large insert ops)
Database.open(String:database_file) returns tasho.database.Database
Opens an existing Database database.
Database(directory, **options) returns tasho.database.Database
Internaly used by Database, please use Database.new or Database.open respectively."""
@classmethod
def new(Database, directory, **options):
open_instead = options.get('open_instead', False)
if os.path.exists(directory):
if open_instead:
return Database.open(directory, **options)
err = "Database '{}' already exists. Drop the database first.".format(directory)
raise _except.DatabaseInitException(err)
os.mkdir(directory)
properties = {
"chunk_size": options.get("chunk_size", 8192),
"table_index": options.get("table_index", "tables"),
"auto_commit": options.get("auto_commit", False)
}
with open(os.path.join(directory, "properties"), "wb") as f:
marshal.dump(properties, f)
with open(os.path.join(directory, properties['table_index']), "wb") as f:
marshal.dump({}, f)
return Database(directory, **properties)
@classmethod
def open(Database, directory, append=True, **options):
if not os.path.exists(directory):
if append:
return Database.new(directory, **options)
else:
err = "Database '{}' does not exist.".format(directory)
raise _except.DatabaseInitException(err)
with open(os.path.join(directory, 'properties'), "rb") as f:
properties = marshal.load(f)
return Database(directory, **properties)
def _load_internal(self, filename):
with open(os.path.join(self._directory, filename), "rb") as f:
return marshal.load(f)
def _write_internal(self, filename, data):
with open(os.path.join(self._directory, filename), "wb") as f:
marshal.dump(data, f)
def __init__(self, directory, **options):
self._options = options
self._directory = directory
self._table_index = self._load_internal(options['table_index'])
self._database = {}
self._tables = {}
self.commit_on_exit = True
for table_i, chunks in self._table_index.items():
self._tables[table_i] = Table(table_i,
directory,
chunks,
self._options.get('auto_commit'),
self._options.get('chunk_size'),
self)
atexit.register(self._atexit_cleanup)
def __repr__(self):
return "<tasho.database: {}>".format(self._directory)
def _atexit_cleanup(self):
if self.commit_on_exit:
dirties = []
for table in self._tables.values():
dirties.extend(table.dirty)
for chunk in dirties:
print(f"Commiting {chunk}")
chunk.commit()
Console.log('Waiting for commits to finish.')
for chunk in dirties:
chunk.commitQueue.join()
@property
def table(self):
return TableSelector(self)
@property
def tables(self):
return self._tables
def get_table(self, table_name):
"""
Database.get_table(String:table_name) returns tasho.database.Table
Returns a table object. Creates a new table if it doesn't exist.
You can also call the table though `Database.table.table_name`
"""
if table_name in self._tables:
return self._tables[table_name]
else:
return self.new_table(table_name)
def new_table(self, table_name):
if table_name in self._table_index:
raise _except.DatabaseInitException(
"Table '{}' already exists. Drop the table first.".format(table_name))
table = Table(table_name,
self._directory,
[],
self._options.get('auto_commit'),
self._options.get('chunk_size'),
self)
table._new_chunk()
self._tables[table.name] = table
self.commit_table_index()
return table
def drop_table(self, table_name, drop_key):
"""
Database.drop_table(String:table_name, String:drop_key)
Deletes a table. You must supply the table's drop key
which can be found through `Table.drop_key`.
"""
if table_name in self._table_index:
if self._tables[table_name].drop_key == drop_key:
chunks = self._table_index.pop(table_name)
table = self._tables.pop(table_name)
table.__is_dropped = True
for chunk in chunks:
os.remove(os.path.join(self._directory, chunk))
else:
raise _except.DatabaseOperationException("Wrong drop key.")
def commit_table_index(self):
self._table_index = {table.name: table.chunk_ids for table in self._tables.values()}
self._write_internal(self._options['table_index'], self._table_index)
class TableSelector():
def __init__(self, database):
self.db = database
def __getattr__(self, table_name):
if table_name in self.db._tables:
return self.db._tables[table_name]
else:
return self.db.new_table(table_name)
def __getitem__(self, table_name):
if table_name in self.db._tables:
return self.db._tables[table_name]
else:
return self.db.new_table(table_name)
|
the-stack_106_16392
|
from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "SIEMENS IP-Camera CCMS2025 Password Disclosure",
"description": "Module exploits SIEMENS IP-Camera CCMS2025 Password Dislosure vulnerability. If target is vulnerable "
"it is possible to read administrative credentials",
"authors": (
"Yakir Wizman", # vulnerability discovery
"VegetableCat <yes-reply[at]linux.com>", # routersploit module
),
"references": (
"https://www.exploit-db.com/exploits/40254/",
),
"devices": (
"SIEMENS IP-Camera CVMS2025-IR",
"SIEMENS IP-Camera CCMS2025",
),
}
target = OptIP("", "Target IPv4 or IPv6 address")
port = OptPort(80, "Target HTTP port")
def __init__(self):
self.content = None
def run(self):
if self.check():
print_success("Target seems to be vulnerable")
print_info(self.content)
print_info("Please login at: {}".format(self.get_target_url(path="/cgi-bin/chklogin.cgi")))
else:
print_error("Exploit failed - target seems to be not vulnerable")
@mute
def check(self):
response = self.http_request(
method="GET",
path="/cgi-bin/readfile.cgi?query=ADMINID"
)
if response and "Adm_ID" in response.text:
self.content = response.text
return True # target is vulnerable
return False # target is not vulnerable
|
the-stack_106_16393
|
import hashlib
import json
import os
# import shutil
import tempfile
import zipfile
from wsgiref.util import FileWrapper
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.http import StreamingHttpResponse, FileResponse
from account.decorators import problem_permission_required, ensure_created_by
from contest.models import Contest, ContestStatus
from fps.parser import FPSHelper, FPSParser
from judge.dispatcher import SPJCompiler
from options.options import SysOptions
from submission.models import Submission, JudgeStatus
from utils.api import APIView, CSRFExemptAPIView, validate_serializer, APIError
from utils.constants import Difficulty
from utils.shortcuts import rand_str, natural_sort_key
from utils.tasks import delete_files
from ..models import Problem, ProblemRuleType, ProblemTag
from ..serializers import (CreateContestProblemSerializer, CompileSPJSerializer,
CreateProblemSerializer, EditProblemSerializer, EditContestProblemSerializer,
ProblemAdminSerializer, TestCaseUploadForm, ContestProblemMakePublicSerializer,
AddContestProblemSerializer, ExportProblemSerializer,
ExportProblemRequestSerialzier, UploadProblemForm, ImportProblemSerializer,
FPSProblemSerializer)
from ..utils import TEMPLATE_BASE, build_problem_template
class TestCaseZipProcessor(object):
def process_zip(self, uploaded_zip_file, spj, dir=""):
try:
zip_file = zipfile.ZipFile(uploaded_zip_file, "r")
except zipfile.BadZipFile:
raise APIError("Bad zip file")
name_list = zip_file.namelist()
test_case_list = self.filter_name_list(name_list, spj=spj, dir=dir)
if not test_case_list:
raise APIError("Empty file")
test_case_id = rand_str()
test_case_dir = os.path.join(settings.TEST_CASE_DIR, test_case_id)
os.mkdir(test_case_dir)
os.chmod(test_case_dir, 0o710)
size_cache = {}
md5_cache = {}
for item in test_case_list:
with open(os.path.join(test_case_dir, item), "wb") as f:
content = zip_file.read(f"{dir}{item}").replace(b"\r\n", b"\n")
size_cache[item] = len(content)
if item.endswith(".out"):
md5_cache[item] = hashlib.md5(content.rstrip()).hexdigest()
f.write(content)
test_case_info = {"spj": spj, "test_cases": {}}
info = []
if spj:
for index, item in enumerate(test_case_list):
data = {"input_name": item, "input_size": size_cache[item]}
info.append(data)
test_case_info["test_cases"][str(index + 1)] = data
else:
# ["1.in", "1.out", "2.in", "2.out"] => [("1.in", "1.out"), ("2.in", "2.out")]
test_case_list = zip(*[test_case_list[i::2] for i in range(2)])
for index, item in enumerate(test_case_list):
data = {"stripped_output_md5": md5_cache[item[1]],
"input_size": size_cache[item[0]],
"output_size": size_cache[item[1]],
"input_name": item[0],
"output_name": item[1]}
info.append(data)
test_case_info["test_cases"][str(index + 1)] = data
with open(os.path.join(test_case_dir, "info"), "w", encoding="utf-8") as f:
f.write(json.dumps(test_case_info, indent=4))
for item in os.listdir(test_case_dir):
os.chmod(os.path.join(test_case_dir, item), 0o640)
return info, test_case_id
def filter_name_list(self, name_list, spj, dir=""):
ret = []
prefix = 1
if spj:
while True:
in_name = f"{prefix}.in"
if f"{dir}{in_name}" in name_list:
ret.append(in_name)
prefix += 1
continue
else:
return sorted(ret, key=natural_sort_key)
else:
while True:
in_name = f"{prefix}.in"
out_name = f"{prefix}.out"
if f"{dir}{in_name}" in name_list and f"{dir}{out_name}" in name_list:
ret.append(in_name)
ret.append(out_name)
prefix += 1
continue
else:
return sorted(ret, key=natural_sort_key)
class TestCaseAPI(CSRFExemptAPIView, TestCaseZipProcessor):
request_parsers = ()
def get(self, request):
problem_id = request.GET.get("problem_id")
if not problem_id:
return self.error("Parameter error, problem_id is required")
try:
problem = Problem.objects.get(id=problem_id)
except Problem.DoesNotExist:
return self.error("Problem does not exists")
if problem.contest:
ensure_created_by(problem.contest, request.user)
else:
ensure_created_by(problem, request.user)
test_case_dir = os.path.join(settings.TEST_CASE_DIR, problem.test_case_id)
if not os.path.isdir(test_case_dir):
return self.error("Test case does not exists")
name_list = self.filter_name_list(os.listdir(test_case_dir), problem.spj)
name_list.append("info")
file_name = os.path.join(test_case_dir, problem.test_case_id + ".zip")
with zipfile.ZipFile(file_name, "w") as file:
for test_case in name_list:
file.write(f"{test_case_dir}/{test_case}", test_case)
response = StreamingHttpResponse(FileWrapper(open(file_name, "rb")),
content_type="application/octet-stream")
response["Content-Disposition"] = f"attachment; filename=problem_{problem.id}_test_cases.zip"
response["Content-Length"] = os.path.getsize(file_name)
return response
def post(self, request):
form = TestCaseUploadForm(request.POST, request.FILES)
if form.is_valid():
spj = form.cleaned_data["spj"] == "true"
file = form.cleaned_data["file"]
else:
return self.error("Upload failed")
zip_file = f"/tmp/{rand_str()}.zip"
with open(zip_file, "wb") as f:
for chunk in file:
f.write(chunk)
info, test_case_id = self.process_zip(zip_file, spj=spj)
os.remove(zip_file)
return self.success({"id": test_case_id, "info": info, "spj": spj})
class CompileSPJAPI(APIView):
@validate_serializer(CompileSPJSerializer)
def post(self, request):
data = request.data
spj_version = rand_str(8)
error = SPJCompiler(data["spj_code"], spj_version, data["spj_language"]).compile_spj()
if error:
return self.error(error)
else:
return self.success()
class ProblemBase(APIView):
def common_checks(self, request):
data = request.data
if data["spj"]:
if not data["spj_language"] or not data["spj_code"]:
return "Invalid spj"
if not data["spj_compile_ok"]:
return "SPJ code must be compiled successfully"
data["spj_version"] = hashlib.md5(
(data["spj_language"] + ":" + data["spj_code"]).encode("utf-8")).hexdigest()
else:
data["spj_language"] = None
data["spj_code"] = None
if data["rule_type"] == ProblemRuleType.OI:
total_score = 0
for item in data["test_case_score"]:
if item["score"] <= 0:
return "Invalid score"
else:
total_score += item["score"]
data["total_score"] = total_score
data["languages"] = list(data["languages"])
class ProblemAPI(ProblemBase):
@problem_permission_required
@validate_serializer(CreateProblemSerializer)
def post(self, request):
data = request.data
_id = data["_id"]
if not _id:
return self.error("Display ID is required")
if Problem.objects.filter(_id=_id, contest_id__isnull=True).exists():
return self.error("Display ID already exists")
error_info = self.common_checks(request)
if error_info:
return self.error(error_info)
# todo check filename and score info
tags = data.pop("tags")
data["created_by"] = request.user
problem = Problem.objects.create(**data)
for item in tags:
try:
tag = ProblemTag.objects.get(name=item)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=item)
problem.tags.add(tag)
return self.success(ProblemAdminSerializer(problem).data)
@problem_permission_required
def get(self, request):
problem_id = request.GET.get("id")
rule_type = request.GET.get("rule_type")
user = request.user
if problem_id:
try:
problem = Problem.objects.get(id=problem_id)
ensure_created_by(problem, request.user)
return self.success(ProblemAdminSerializer(problem).data)
except Problem.DoesNotExist:
return self.error("Problem does not exist")
problems = Problem.objects.filter(contest_id__isnull=True).order_by("-create_time")
if rule_type:
if rule_type not in ProblemRuleType.choices():
return self.error("Invalid rule_type")
else:
problems = problems.filter(rule_type=rule_type)
keyword = request.GET.get("keyword", "").strip()
if keyword:
problems = problems.filter(Q(title__icontains=keyword) | Q(_id__icontains=keyword))
if not user.can_mgmt_all_problem():
problems = problems.filter(created_by=user)
return self.success(self.paginate_data(request, problems, ProblemAdminSerializer))
@problem_permission_required
@validate_serializer(EditProblemSerializer)
def put(self, request):
data = request.data
problem_id = data.pop("id")
try:
problem = Problem.objects.get(id=problem_id)
ensure_created_by(problem, request.user)
except Problem.DoesNotExist:
return self.error("Problem does not exist")
_id = data["_id"]
if not _id:
return self.error("Display ID is required")
if Problem.objects.exclude(id=problem_id).filter(_id=_id, contest_id__isnull=True).exists():
return self.error("Display ID already exists")
error_info = self.common_checks(request)
if error_info:
return self.error(error_info)
# todo check filename and score info
tags = data.pop("tags")
data["languages"] = list(data["languages"])
for k, v in data.items():
setattr(problem, k, v)
problem.save()
problem.tags.remove(*problem.tags.all())
for tag in tags:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
return self.success()
@problem_permission_required
def delete(self, request):
id = request.GET.get("id")
if not id:
return self.error("Invalid parameter, id is required")
try:
problem = Problem.objects.get(id=id, contest_id__isnull=True)
except Problem.DoesNotExist:
return self.error("Problem does not exists")
ensure_created_by(problem, request.user)
# d = os.path.join(settings.TEST_CASE_DIR, problem.test_case_id)
# if os.path.isdir(d):
# shutil.rmtree(d, ignore_errors=True)
problem.delete()
return self.success()
class ContestProblemAPI(ProblemBase):
@validate_serializer(CreateContestProblemSerializer)
def post(self, request):
data = request.data
try:
contest = Contest.objects.get(id=data.pop("contest_id"))
ensure_created_by(contest, request.user)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
if data["rule_type"] != contest.rule_type:
return self.error("Invalid rule type")
_id = data["_id"]
if not _id:
return self.error("Display ID is required")
if Problem.objects.filter(_id=_id, contest=contest).exists():
return self.error("Duplicate Display id")
error_info = self.common_checks(request)
if error_info:
return self.error(error_info)
# todo check filename and score info
data["contest"] = contest
tags = data.pop("tags")
data["created_by"] = request.user
problem = Problem.objects.create(**data)
for item in tags:
try:
tag = ProblemTag.objects.get(name=item)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=item)
problem.tags.add(tag)
return self.success(ProblemAdminSerializer(problem).data)
def get(self, request):
problem_id = request.GET.get("id")
contest_id = request.GET.get("contest_id")
user = request.user
if problem_id:
try:
problem = Problem.objects.get(id=problem_id)
ensure_created_by(problem.contest, user)
except Problem.DoesNotExist:
return self.error("Problem does not exist")
return self.success(ProblemAdminSerializer(problem).data)
if not contest_id:
return self.error("Contest id is required")
try:
contest = Contest.objects.get(id=contest_id)
ensure_created_by(contest, user)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
problems = Problem.objects.filter(contest=contest).order_by("-create_time")
if user.is_admin():
problems = problems.filter(contest__created_by=user)
keyword = request.GET.get("keyword")
if keyword:
problems = problems.filter(title__contains=keyword)
return self.success(self.paginate_data(request, problems, ProblemAdminSerializer))
@validate_serializer(EditContestProblemSerializer)
def put(self, request):
data = request.data
user = request.user
try:
contest = Contest.objects.get(id=data.pop("contest_id"))
ensure_created_by(contest, user)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
if data["rule_type"] != contest.rule_type:
return self.error("Invalid rule type")
problem_id = data.pop("id")
try:
problem = Problem.objects.get(id=problem_id, contest=contest)
except Problem.DoesNotExist:
return self.error("Problem does not exist")
_id = data["_id"]
if not _id:
return self.error("Display ID is required")
if Problem.objects.exclude(id=problem_id).filter(_id=_id, contest=contest).exists():
return self.error("Display ID already exists")
error_info = self.common_checks(request)
if error_info:
return self.error(error_info)
# todo check filename and score info
tags = data.pop("tags")
data["languages"] = list(data["languages"])
for k, v in data.items():
setattr(problem, k, v)
problem.save()
problem.tags.remove(*problem.tags.all())
for tag in tags:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
return self.success()
def delete(self, request):
id = request.GET.get("id")
if not id:
return self.error("Invalid parameter, id is required")
try:
problem = Problem.objects.get(id=id, contest_id__isnull=False)
except Problem.DoesNotExist:
return self.error("Problem does not exists")
ensure_created_by(problem.contest, request.user)
if Submission.objects.filter(problem=problem).exists():
return self.error("Can't delete the problem as it has submissions")
# d = os.path.join(settings.TEST_CASE_DIR, problem.test_case_id)
# if os.path.isdir(d):
# shutil.rmtree(d, ignore_errors=True)
problem.delete()
return self.success()
class MakeContestProblemPublicAPIView(APIView):
@validate_serializer(ContestProblemMakePublicSerializer)
@problem_permission_required
def post(self, request):
data = request.data
display_id = data.get("display_id")
if Problem.objects.filter(_id=display_id, contest_id__isnull=True).exists():
return self.error("Duplicate display ID")
try:
problem = Problem.objects.get(id=data["id"])
except Problem.DoesNotExist:
return self.error("Problem does not exist")
if not problem.contest or problem.is_public:
return self.error("Already be a public problem")
problem.is_public = True
problem.save()
# https://docs.djangoproject.com/en/1.11/topics/db/queries/#copying-model-instances
tags = problem.tags.all()
problem.pk = None
problem.contest = None
problem._id = display_id
problem.visible = False
problem.submission_number = problem.accepted_number = 0
problem.statistic_info = {}
problem.save()
problem.tags.set(tags)
return self.success()
class AddContestProblemAPI(APIView):
@validate_serializer(AddContestProblemSerializer)
def post(self, request):
data = request.data
try:
contest = Contest.objects.get(id=data["contest_id"])
problem = Problem.objects.get(id=data["problem_id"])
except (Contest.DoesNotExist, Problem.DoesNotExist):
return self.error("Contest or Problem does not exist")
if contest.status == ContestStatus.CONTEST_ENDED:
return self.error("Contest has ended")
if Problem.objects.filter(contest=contest, _id=data["display_id"]).exists():
return self.error("Duplicate display id in this contest")
tags = problem.tags.all()
problem.pk = None
problem.contest = contest
problem.is_public = True
problem.visible = True
problem._id = request.data["display_id"]
problem.submission_number = problem.accepted_number = 0
problem.statistic_info = {}
problem.save()
problem.tags.set(tags)
return self.success()
class ExportProblemAPI(APIView):
def choose_answers(self, user, problem):
ret = []
for item in problem.languages:
submission = Submission.objects.filter(problem=problem,
user_id=user.id,
language=item,
result=JudgeStatus.ACCEPTED).order_by("-create_time").first()
if submission:
ret.append({"language": submission.language, "code": submission.code})
return ret
def process_one_problem(self, zip_file, user, problem, index):
info = ExportProblemSerializer(problem).data
info["answers"] = self.choose_answers(user, problem=problem)
compression = zipfile.ZIP_DEFLATED
zip_file.writestr(zinfo_or_arcname=f"{index}/problem.json",
data=json.dumps(info, indent=4),
compress_type=compression)
problem_test_case_dir = os.path.join(settings.TEST_CASE_DIR, problem.test_case_id)
with open(os.path.join(problem_test_case_dir, "info")) as f:
info = json.load(f)
for k, v in info["test_cases"].items():
zip_file.write(filename=os.path.join(problem_test_case_dir, v["input_name"]),
arcname=f"{index}/testcase/{v['input_name']}",
compress_type=compression)
if not info["spj"]:
zip_file.write(filename=os.path.join(problem_test_case_dir, v["output_name"]),
arcname=f"{index}/testcase/{v['output_name']}",
compress_type=compression)
@validate_serializer(ExportProblemRequestSerialzier)
def get(self, request):
problems = Problem.objects.filter(id__in=request.data["problem_id"])
for problem in problems:
if problem.contest:
ensure_created_by(problem.contest, request.user)
else:
ensure_created_by(problem, request.user)
path = f"/tmp/{rand_str()}.zip"
with zipfile.ZipFile(path, "w") as zip_file:
for index, problem in enumerate(problems):
self.process_one_problem(zip_file=zip_file, user=request.user, problem=problem, index=index + 1)
delete_files.send_with_options(args=(path,), delay=300_000)
resp = FileResponse(open(path, "rb"))
resp["Content-Type"] = "application/zip"
resp["Content-Disposition"] = f"attachment;filename=problem-export.zip"
return resp
class ImportProblemAPI(CSRFExemptAPIView, TestCaseZipProcessor):
request_parsers = ()
def post(self, request):
form = UploadProblemForm(request.POST, request.FILES)
if form.is_valid():
file = form.cleaned_data["file"]
tmp_file = f"/tmp/{rand_str()}.zip"
with open(tmp_file, "wb") as f:
for chunk in file:
f.write(chunk)
else:
return self.error("Upload failed")
count = 0
with zipfile.ZipFile(tmp_file, "r") as zip_file:
name_list = zip_file.namelist()
for item in name_list:
if "/problem.json" in item:
count += 1
with transaction.atomic():
for i in range(1, count + 1):
with zip_file.open(f"{i}/problem.json") as f:
problem_info = json.load(f)
serializer = ImportProblemSerializer(data=problem_info)
if not serializer.is_valid():
return self.error(f"Invalid problem format, error is {serializer.errors}")
else:
problem_info = serializer.data
for item in problem_info["template"].keys():
if item not in SysOptions.language_names:
return self.error(f"Unsupported language {item}")
problem_info["display_id"] = problem_info["display_id"][:24]
for k, v in problem_info["template"].items():
problem_info["template"][k] = build_problem_template(v["prepend"], v["template"],
v["append"])
spj = problem_info["spj"] is not None
rule_type = problem_info["rule_type"]
test_case_score = problem_info["test_case_score"]
# process test case
_, test_case_id = self.process_zip(tmp_file, spj=spj, dir=f"{i}/testcase/")
problem_obj = Problem.objects.create(_id=problem_info["display_id"],
title=problem_info["title"],
description=problem_info["description"]["value"],
input_description=problem_info["input_description"][
"value"],
output_description=problem_info["output_description"][
"value"],
hint=problem_info["hint"]["value"],
test_case_score=test_case_score if test_case_score else [],
time_limit=problem_info["time_limit"],
memory_limit=problem_info["memory_limit"],
samples=problem_info["samples"],
template=problem_info["template"],
rule_type=problem_info["rule_type"],
source=problem_info["source"],
spj=spj,
spj_code=problem_info["spj"]["code"] if spj else None,
spj_language=problem_info["spj"][
"language"] if spj else None,
spj_version=rand_str(8) if spj else "",
languages=SysOptions.language_names,
created_by=request.user,
visible=False,
difficulty=Difficulty.HIDE,
total_score=sum(item["score"] for item in test_case_score)
if rule_type == ProblemRuleType.OI else 0,
test_case_id=test_case_id
)
for tag_name in problem_info["tags"]:
tag_obj, _ = ProblemTag.objects.get_or_create(name=tag_name)
problem_obj.tags.add(tag_obj)
return self.success({"import_count": count})
class FPSProblemImport(CSRFExemptAPIView):
request_parsers = ()
def _create_problem(self, problem_data, creator):
if problem_data["time_limit"]["unit"] == "ms":
time_limit = problem_data["time_limit"]["value"]
else:
time_limit = problem_data["time_limit"]["value"] * 1000
template = {}
prepend = {}
append = {}
for t in problem_data["prepend"]:
prepend[t["language"]] = t["code"]
for t in problem_data["append"]:
append[t["language"]] = t["code"]
for t in problem_data["template"]:
our_lang = lang = t["language"]
if lang == "Python":
our_lang = "Python3"
template[our_lang] = TEMPLATE_BASE.format(prepend.get(lang, ""), t["code"], append.get(lang, ""))
spj = problem_data["spj"] is not None
Problem.objects.create(_id=f"fps-{rand_str(4)}",
title=problem_data["title"],
description=problem_data["description"],
input_description=problem_data["input"],
output_description=problem_data["output"],
hint=problem_data["hint"],
test_case_score=problem_data["test_case_score"],
time_limit=time_limit,
memory_limit=problem_data["memory_limit"]["value"],
samples=problem_data["samples"],
template=template,
rule_type=ProblemRuleType.ACM,
source=problem_data.get("source", ""),
spj=spj,
spj_code=problem_data["spj"]["code"] if spj else None,
spj_language=problem_data["spj"]["language"] if spj else None,
spj_version=rand_str(8) if spj else "",
visible=False,
languages=SysOptions.language_names,
created_by=creator,
difficulty=Difficulty.MID,
test_case_id=problem_data["test_case_id"])
def post(self, request):
form = UploadProblemForm(request.POST, request.FILES)
if form.is_valid():
file = form.cleaned_data["file"]
with tempfile.NamedTemporaryFile("wb") as tf:
for chunk in file.chunks(4096):
tf.file.write(chunk)
tf.file.flush()
os.fsync(tf.file)
problems = FPSParser(tf.name).parse()
else:
return self.error("Parse upload file error")
helper = FPSHelper()
with transaction.atomic():
for _problem in problems:
test_case_id = rand_str()
test_case_dir = os.path.join(settings.TEST_CASE_DIR, test_case_id)
os.mkdir(test_case_dir)
score = []
for item in helper.save_test_case(_problem, test_case_dir)["test_cases"].values():
score.append({"score": 0, "input_name": item["input_name"],
"output_name": item.get("output_name")})
problem_data = helper.save_image(_problem, settings.UPLOAD_DIR, settings.UPLOAD_PREFIX)
s = FPSProblemSerializer(data=problem_data)
if not s.is_valid():
return self.error(f"Parse FPS file error: {s.errors}")
problem_data = s.data
problem_data["test_case_id"] = test_case_id
problem_data["test_case_score"] = score
self._create_problem(problem_data, request.user)
return self.success({"import_count": len(problems)})
|
the-stack_106_16395
|
# set random number generator
np.random.seed(2020)
# initialize step_end, n, t_range, v and syn
step_end = int(t_max / dt)
n = 50
t_range = np.linspace(0, t_max, num=step_end)
v_n = el * np.ones([n, step_end])
syn = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random([n, step_end]) - 1))
# loop for step_end - 1 steps
for step in range(1, step_end):
v_n[:, step] = v_n[:, step - 1] + (dt / tau) * (el - v_n[:, step - 1] + r * syn[:, step])
with plt.xkcd():
# initialize the figure
plt.figure()
plt.title('Multiple realizations of $V_m$')
plt.xlabel('time (s)')
plt.ylabel('$V_m$ (V)')
plt.plot(t_range, v_n.T, 'k', alpha=0.3)
plt.show()
|
the-stack_106_16396
|
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
import copy
import os
from frameworks.framework_base import FrameworkBase
from utils.custom_logger import getLogger
class Caffe2Framework(FrameworkBase):
IDENTIFIER = 'Caffe2Observer '
NET = 'NET'
def __init__(self, tempdir):
super(Caffe2Framework, self).__init__()
self.tempdir = os.path.join(tempdir, self.getName())
os.makedirs(self.tempdir, 0o777)
# cannot have any variable pass among methods
def getName(self):
return "caffe2"
def runBenchmark(self, info, benchmark, platform):
output, output_files = \
super(Caffe2Framework, self).runBenchmark(info, benchmark,
platform)
return output, output_files
def verifyBenchmarkFile(self, benchmark, filename, is_post):
# model is now optional
if "model" in benchmark:
model = benchmark["model"]
assert "files" in model, \
"Files field is missing in benchmark {}".format(filename)
assert "name" in model, \
"Name field is missing in benchmark {}".format(filename)
assert "format" in model, \
"Format field is missing in benchmark {}".format(filename)
for f in model["files"]:
field = model["files"][f]
assert "filename" in field, \
"Filename is missing in file" + \
" {} of benchmark {}".format(f, filename)
assert "location" in field, \
"Location is missing in file" + \
" {} of benchmark {}".format(f, filename)
assert "md5" in field, \
"MD5 is missing in file" + \
" {} of benchmark {}".format(f, filename)
# tests is mandatory
assert "tests" in benchmark, \
"Tests field is missing in benchmark {}".format(filename)
tests = benchmark["tests"]
if is_post:
assert len(tests) == 1, "After rewrite, only one test in " + \
"one benchmark."
else:
assert len(tests) > 0, "Tests cannot be empty"
is_generic_test = tests[0]["metric"] == "generic"
for test in tests:
assert "metric" in test, "Metric field is missing in " + \
"benchmark {}".format(filename)
# no check is needed if the metric is generic
if is_generic_test:
assert test["metric"] == "generic", "All tests must be generic"
continue
assert "iter" in test, "Iter field is missing in benchmark " + \
"{}".format(filename)
assert "warmup" in test, "Warmup field is missing in " + \
"benchmark {}".format(filename)
assert "identifier" in test, "Identifier field is missing in " + \
"benchmark {}".format(filename)
if "commands" in test or "command" in test or "arguments" in test:
continue
# for backward compatibility purpose
assert "inputs" in test, "Inputs field is missing in " + \
"benchmark {}".format(filename)
num = -1
for ip_name in test["inputs"]:
ip = test["inputs"][ip_name]
assert "shapes" in ip, "Shapes field is missing in" + \
" input {}".format(ip_name) + \
" of benchmark {}".format(filename)
assert "type" in ip, \
"Type field is missing in input {}".format(ip_name) + \
" of benchmark {}".format(filename)
assert isinstance(ip["shapes"], list), \
"Shape field should be a list. However, input " + \
"{} of benchmark is not.".format(ip_name, filename)
dims = -1
for item in ip["shapes"]:
assert isinstance(item, list), \
"Shapes must be a list of list."
if dims < 0:
dims = len(item)
else:
assert dims == len(item), \
"All shapes of one data must have " + \
"the same dimension"
if num < 0:
num = len(ip["shapes"])
else:
assert len(ip["shapes"]) == num, "The shapes of " + \
"input {} ".format(ip_name) + \
"are not of the same dimension in " + \
"benchmark {}".format(filename)
def rewriteBenchmarkTests(self, benchmark, filename):
tests = benchmark.pop("tests")
new_tests = self._replicateTestsOnDims(tests, filename)
benchmark["tests"] = new_tests
def _replicateTestsOnDims(self, tests, source):
new_tests = []
for test in tests:
if "inputs" not in test:
new_tests.append(copy.deepcopy(test))
continue
num = -1
for ip_name in test["inputs"]:
ip = test["inputs"][ip_name]
if num < 0:
num = len(ip["shapes"])
break
if num == 1:
new_tests.append(copy.deepcopy(test))
else:
for i in range(num):
t = copy.deepcopy(test)
for ip_name in t["inputs"]:
t["inputs"][ip_name]["shapes"] = \
[test["inputs"][ip_name]["shapes"][i]]
new_tests.append(t)
return new_tests
def _checkNumFiles(self, files, source, num, is_input):
new_num = num
ftype = "input" if is_input else "output"
for name in files:
fs = files[name]
if isinstance(fs, list):
if new_num < 0:
new_num = len(fs)
else:
assert len(fs) == new_num, \
"The number of specified {} files ".format(ftype) + \
"in blob {} do not ".format(name) + \
"match in all input blobs in benchmark " + \
"{}.".format(source)
else:
new_num = 1
return new_num
def composeRunCommand(self, commands, platform, programs,
model, test, model_files,
input_files, output_files, shared_libs,
preprocess_files=None):
cmds = super(Caffe2Framework, self).composeRunCommand(commands,
platform,
programs,
model,
test,
model_files,
input_files,
output_files,
shared_libs,
preprocess_files)
if cmds:
return cmds
# old format, will deprecate
cmd = ["--net", model_files["predict"],
"--warmup", test["warmup"],
"--iter", test["iter"]
]
if "program" in programs:
cmd = [programs["program"]] + cmd
if "init" in model_files:
cmd.append("--init_net")
cmd.append(model_files["init"])
if input_files:
inputs = ",".join(list(input_files.keys()))
cmd.extend(["--input_file",
",".join(list(input_files.values()))])
else:
inputs = ",".join(list(test["inputs"].keys()))
input_dims = [
",".join([str(a) for a in test["inputs"][x]["shapes"][0]])
for x in test["inputs"]]
input_dims = ";".join(input_dims)
cmd.extend(["--input_dims", input_dims])
cmd.extend(["--input", inputs])
cmd.extend(["--input_type",
list(test["inputs"].values())[0]["type"]])
if "output_files" in test:
outputs = ",".join(list(test["output_files"].keys()))
cmd.extend(["--output", outputs])
cmd.extend(["--text_output", "true"])
cmd.extend(["--output_folder", platform.getOutputDir()])
if "commands" in test:
if "caffe2" in test["commands"]:
for key in test["commands"]["caffe2"]:
val = test["commands"]["caffe2"][key]
cmd.extend(["--" + key, val])
if shared_libs:
cmd = ["export", "LD_LIBRARY_PATH=$\{LD_LIBRARY_PATH\}:" +
os.path.dirname(shared_libs[0]), "&&"] + cmd
cmd = ' '.join(str(s) for s in cmd)
return [cmd]
def runOnPlatform(self, total_num, cmd, platform, platform_args,
converter_class):
if converter_class is None:
converter_class = self.converters["json_with_identifier_converter"]
converter = converter_class()
results = []
num = 0
# emulate do...while... loop
while True:
output = platform.runBenchmark(cmd, platform_args=platform_args)
one_result, valid_run_idxs = \
converter.collect(output, identifier=self.IDENTIFIER)
valid_run_idxs = [num + idx for idx in valid_run_idxs]
num += len(valid_run_idxs)
results.extend(one_result)
if num < total_num:
num_items = len(valid_run_idxs)
if num_items > 0:
getLogger().info("%d items collected, Still missing %d "
"runs. Collect again." %
(num_items, total_num - num))
continue
else:
getLogger().info("No new items collected, "
"finish collecting...")
elif total_num >= 0 and num > total_num:
# if collect more than the needed number, get the
# latest entries. This may happen when the data in
# the previous runs are not cleared. e.g. on some
# android 5 devices. Or, it may happen when multiple
# runs are needed to collect the desired number of
# iterations
results = results[valid_run_idxs[num - total_num]:]
break
metric = converter.convert(results)
return metric
|
the-stack_106_16397
|
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is modified from this git repo: https://github.com/xunzheng/notears
@inproceedings{zheng2020learning,
author = {Zheng, Xun and Dan, Chen and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},
booktitle = {International Conference on Artificial Intelligence and Statistics},
title = {{Learning sparse nonparametric DAGs}},
year = {2020}
}
"""
import logging
from typing import Iterable, List, Tuple, Union
import numpy as np
import scipy.optimize as sopt
import torch
import torch.nn as nn
from sklearn.base import BaseEstimator
from causalnex.structure.pytorch.dist_type._base import DistTypeBase
from causalnex.structure.pytorch.nonlinear import LocallyConnected
# Problem in pytorch 1.6 (_forward_unimplemented), fixed in next release:
# pylint: disable=abstract-method
class NotearsMLP(nn.Module, BaseEstimator):
"""
Class for NOTEARS MLP (Multi-layer Perceptron) model.
The model weights consist of dag_layer and loc_lin_layer weights respectively.
dag_layer weight is the weight of the first fully connected layer which determines the causal structure.
loc_lin_layer weights are the weight of hidden layers after the first fully connected layer
"""
# pylint: disable=too-many-arguments
def __init__(
self,
n_features: int,
dist_types: List[DistTypeBase],
use_bias: bool = False,
hidden_layer_units: Iterable[int] = (0,),
bounds: List[Tuple[int, int]] = None,
lasso_beta: float = 0.0,
ridge_beta: float = 0.0,
nonlinear_clamp: float = 1e-2,
):
"""
Constructor for NOTEARS MLP class.
Args:
n_features: number of input features.
dist_types: list of data type objects used to fit the NOTEARS algorithm.
use_bias: True to add the intercept to the model
hidden_layer_units: An iterable where its length determine the number of layers used,
and the numbers determine the number of nodes used for the layer in order.
bounds: bound constraint for each parameter.
lasso_beta: Constant that multiplies the lasso term (l1 regularisation).
It only applies to dag_layer weight.
ridge_beta: Constant that multiplies the ridge term (l2 regularisation).
It applies to both dag_layer and loc_lin_layer weights.
nonlinear_clamp: Value used to soft clamp the nonlinear layer normalisation.
Prevents the weights from being scaled above 1/nonlinear_clamp.
"""
super().__init__()
self.device = torch.device("cpu")
self.lasso_beta = lasso_beta
self.ridge_beta = ridge_beta
self.nonlinear_clamp = nonlinear_clamp
# cast to list for later concat.
self.dims = (
[n_features] + list(hidden_layer_units) + [1]
if hidden_layer_units[0]
else [n_features, 1]
)
# dag_layer: initial linear layer
self.dag_layer = nn.Linear(
self.dims[0], self.dims[0] * self.dims[1], bias=use_bias
).float()
nn.init.zeros_(self.dag_layer.weight)
if use_bias:
nn.init.zeros_(self.dag_layer.bias)
# loc_lin_layer: local linear layers
layers = [
LocallyConnected(
self.dims[0], input_features, output_features, bias=use_bias
).float()
for input_features, output_features in zip(self.dims[1:-1], self.dims[2:])
]
self._loc_lin_layer_weights = nn.ModuleList(layers)
for layer in layers:
layer.reset_parameters()
# set the bounds as an attribute on the weights object
self.dag_layer.weight.bounds = bounds
# set the dist types
self.dist_types = dist_types
# type the adjacency matrix
self.adj = None
self.adj_mean_effect = None
@property
def _logger(self):
return logging.getLogger(self.__class__.__name__)
@property
def dag_layer_bias(self) -> Union[torch.Tensor, None]:
"""
dag_layer bias is the bias of the first fully connected layer which determines the causal structure.
Returns:
dag_layer bias if use_bias is True, otherwise None
"""
return self.dag_layer.bias
@property
def dag_layer_weight(self) -> torch.Tensor:
"""
dag_layer weight is the weight of the first fully connected layer which determines the causal structure.
Returns:
dag_layer weight
"""
return self.dag_layer.weight
@property
def loc_lin_layer_weights(self) -> torch.Tensor:
"""
loc_lin_layer weights are the weight of hidden layers after the first fully connected layer.
Returns:
loc_lin_layer weights
"""
return self._loc_lin_layer_weights
def forward(self, x: torch.Tensor) -> torch.Tensor: # [n, d] -> [n, d]
"""
Feed forward calculation for the model.
Args:
x: input torch tensor
Returns:
output tensor from the model
"""
x = self.dag_layer(x) # [n, d * m1]
x = x.view(-1, self.dims[0], self.dims[1]) # [n, d, m1]
for layer in self.loc_lin_layer_weights:
x = torch.sigmoid(x) # [n, d, m1]
# soft clamp the denominator to prevent divide by zero and prevent very large weight increases
x = (x - x.mean(dim=0).detach()) / torch.sqrt(
(self.nonlinear_clamp + x.var(dim=0).detach())
)
x = layer(x) # [n, d, m2]
x = x.squeeze(dim=2) # [n, d]
return x
def reconstruct_data(self, X: np.ndarray) -> np.ndarray:
"""
Performs X_hat reconstruction,
then converts latent space to original data space via link function.
Args:
X: input data used to reconstruct
Returns:
reconstructed data
"""
# perform preprocessing and column expansions, do NOT refit
for dist_type in self.dist_types:
X = dist_type.preprocess_X(X, fit_transform=False)
with torch.no_grad():
# convert the predict data to pytorch tensor
X = torch.from_numpy(X).float().to(self.device)
# perform forward reconstruction
X_hat = self(X)
# recover each one of the latent space projections
for dist_type in self.dist_types:
X_hat = dist_type.inverse_link_function(X_hat)
return np.asarray(X_hat.cpu().detach().numpy().astype(np.float64))
@property
def bias(self) -> Union[np.ndarray, None]:
"""
Get the vector of feature biases
Returns:
bias vector if use_bias is True, otherwise None
"""
bias = self.dag_layer_bias
return bias if bias is None else bias.cpu().detach().numpy()
def fit(
self,
x: np.ndarray,
max_iter: int = 100,
h_tol: float = 1e-8,
rho_max: float = 1e16,
):
"""
Fit NOTEARS MLP model using the input data x
Args:
x: 2d numpy array input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.
max_iter: max number of dual ascent steps during optimisation.
h_tol: exit if h(w) < h_tol (as opposed to strict definition of 0).
rho_max: to be updated
"""
rho, alpha, h = 1.0, 0.0, np.inf
X_torch = torch.from_numpy(x).float().to(self.device)
for n_iter in range(max_iter):
rho, alpha, h = self._dual_ascent_step(X_torch, rho, alpha, h, rho_max)
if h <= h_tol or rho >= rho_max:
break
if n_iter == max_iter - 1 and h > h_tol:
self._logger.warning(
"Failed to converge. Consider increasing max_iter."
)
# calculate the adjacency matrix after the fitting is finished
self.adj = (
self._calculate_adj(X_torch, mean_effect=False).cpu().detach().numpy()
)
self.adj_mean_effect = (
self._calculate_adj(X_torch, mean_effect=True).cpu().detach().numpy()
)
def _dual_ascent_step(
self, X: torch.Tensor, rho: float, alpha: float, h: float, rho_max: float
) -> Tuple[float, float, float]:
"""
Perform one step of dual ascent in augmented Lagrangian.
Args:
X: input tensor data.
rho: max number of dual ascent steps during optimisation.
alpha: exit if h(w) < h_tol (as opposed to strict definition of 0).
h: DAGness of the adjacency matrix
rho_max: to be updated
Returns:
rho, alpha and h
"""
def _get_flat_grad(params: List[torch.Tensor]) -> np.ndarray:
"""
Get flatten gradient vector from the parameters of the model
Args:
params: parameters of the model
Returns:
flatten gradient vector in numpy form
"""
views = [
p.data.new(p.data.numel()).zero_()
if p.grad is None
else p.grad.data.to_dense().view(-1)
if p.grad.data.is_sparse
else p.grad.data.view(-1)
for p in params
]
return torch.cat(views, 0).cpu().detach().numpy()
def _get_flat_bounds(
params: List[torch.Tensor],
) -> List[Tuple[Union[None, float]]]:
"""
Get bound constraint for each parameter in flatten vector form from the parameters of the model
Args:
params: parameters of the model
Returns:
flatten vector of bound constraints for each parameter in numpy form
"""
bounds = []
for p in params:
try:
b = p.bounds
except AttributeError:
b = [(None, None)] * p.numel()
bounds += b
return bounds
def _get_flat_params(params: List[torch.Tensor]) -> np.ndarray:
"""
Get parameters in flatten vector from the parameters of the model
Args:
params: parameters of the model
Returns:
flatten parameters vector in numpy form
"""
views = [
p.data.to_dense().view(-1) if p.data.is_sparse else p.data.view(-1)
for p in params
]
return torch.cat(views, 0).cpu().detach().numpy()
def _update_params_from_flat(
params: List[torch.Tensor], flat_params: np.ndarray
):
"""
Update parameters of the model from the parameters in the form of flatten vector
Args:
params: parameters of the model
flat_params: parameters in the form of flatten vector
"""
offset = 0
flat_params_torch = torch.from_numpy(flat_params).to(
torch.get_default_dtype()
)
for p in params:
n_params = p.numel()
# view_as to avoid deprecated pointwise semantics
p.data = flat_params_torch[offset : offset + n_params].view_as(p.data)
offset += n_params
def _func(flat_params: np.ndarray) -> Tuple[float, np.ndarray]:
"""
Objective function that the NOTEARS algorithm tries to minimise.
Args:
flat_params: parameters to be optimised to minimise the objective function
Returns:
Loss and gradient
"""
_update_params_from_flat(params, flat_params)
optimizer.zero_grad()
n_features = X.shape[1]
X_hat = self(X)
h_val = self._h_func()
loss = 0.0
# sum the losses across all dist types
for dist_type in self.dist_types:
loss = loss + dist_type.loss(X, X_hat)
lagrange_penalty = 0.5 * rho * h_val * h_val + alpha * h_val
# NOTE: both the l2 and l1 regularization are NOT applied to the bias parameters
l2_reg = 0.5 * self.ridge_beta * self._l2_reg(n_features)
l1_reg = self.lasso_beta * self._l1_reg(n_features)
primal_obj = loss + lagrange_penalty + l2_reg + l1_reg
primal_obj.backward()
loss = primal_obj.item()
flat_grad = _get_flat_grad(params)
return loss, flat_grad.astype("float64")
optimizer = torch.optim.Optimizer(self.parameters(), dict())
params = optimizer.param_groups[0]["params"]
flat_params = _get_flat_params(params)
bounds = _get_flat_bounds(params)
h_new = np.inf
while (rho < rho_max) and (h_new > 0.25 * h or h_new == np.inf):
# Magic
sol = sopt.minimize(
_func,
flat_params,
method="L-BFGS-B",
jac=True,
bounds=bounds,
)
_update_params_from_flat(params, sol.x)
h_new = self._h_func().item()
if h_new > 0.25 * h:
rho *= 10
alpha += rho * h_new
return rho, alpha, h_new
def _h_func(self) -> torch.Tensor:
"""
Constraint function of the NOTEARS algorithm.
Constrain 2-norm-squared of dag_layer weights of the model along m1 dim to be a DAG
Returns:
DAGness of the adjacency matrix
"""
d = self.dims[0]
d_torch = torch.tensor(d).to(self.device) # pylint: disable=not-callable
# only consider the dag_layer for h(W) for compute efficiency
dag_layer_weight = self.dag_layer_weight.view(d, -1, d) # [j, m1, i]
square_weight_mat = torch.sum(
dag_layer_weight * dag_layer_weight, dim=1
).t() # [i, j]
# modify the h(W) matrix to deal with expanded columns
original_idxs = []
for dist_type in self.dist_types:
# modify the weight matrix to prevent spurious cycles with expended columns
square_weight_mat = dist_type.modify_h(square_weight_mat)
# gather the original idxs
original_idxs.append(dist_type.idx)
# original size is largest original index
original_size = np.max(original_idxs) + 1
# subselect the top LH corner of matrix which corresponds to original data
square_weight_mat = square_weight_mat[:original_size, :original_size]
# update d and d_torch to match the new matrix size
d = square_weight_mat.shape[0]
d_torch = torch.tensor(d).to(self.device) # pylint: disable=not-callable
# h = trace_expm(a) - d # (Zheng et al. 2018)
characteristic_poly_mat = (
torch.eye(d).to(self.device) + square_weight_mat / d_torch
) # (Yu et al. 2019)
polynomial_mat = torch.matrix_power(characteristic_poly_mat, d - 1)
h = (polynomial_mat.t() * characteristic_poly_mat).sum() - d
return h
def _l1_reg(self, n_features: int) -> torch.Tensor:
"""
Take average l1 of all weight parameters of the model.
NOTE: regularisation needs to be scaled up by the number of features
because the loss scales with feature number.
Returns:
l1 regularisation term.
"""
return torch.mean(torch.abs(self.dag_layer_weight)) * n_features
def _l2_reg(self, n_features: int) -> torch.Tensor:
"""
Take average 2-norm-squared of all weight parameters of the model.
NOTE: regularisation needs to be scaled up by the number of features
because the loss scales with feature number.
Returns:
l2 regularisation term.
"""
reg = 0.0
reg += torch.sum(self.dag_layer_weight ** 2)
for layer in self.loc_lin_layer_weights:
reg += torch.sum(layer.weight ** 2)
# calculate the total number of elements used in the above sums
n_elements = self.dag_layer_weight.numel()
for layer in self.loc_lin_layer_weights:
n_elements = n_elements + layer.weight.numel()
return reg / n_elements * n_features
def _calculate_adj(self, X: torch.Tensor, mean_effect: bool) -> torch.Tensor:
"""
Calculate the adjacency matrix.
For the linear case, this is just dag_layer_weight.
For the nonlinear case, approximate the relationship using the gradient of X_hat wrt X.
"""
# for the linear case, save compute by just returning the dag_layer weights
if len(self.dims) <= 2:
adj = (
self.dag_layer_weight.T
if mean_effect
else torch.abs(self.dag_layer_weight.T)
)
return adj
_, n_features = X.shape
# get the data X and reconstruction X_hat
X = X.clone().requires_grad_()
X_hat = self(X).sum(dim=0) # shape = (n_features,)
adj = []
# iterate over sums of reconstructed features
for j in range(n_features):
# calculate the gradient of X_hat wrt X
ddx = torch.autograd.grad(X_hat[j], X, create_graph=True)[0]
if mean_effect:
# get the average effect
adj.append(ddx.mean(axis=0).unsqueeze(0))
else:
# otherwise, use the average L1 of the gradient as the W
adj.append(torch.abs(ddx).mean(dim=0).unsqueeze(0))
adj = torch.cat(adj, dim=0)
# transpose to get the adjacency matrix
return adj.T
|
the-stack_106_16399
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
# #############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
# #############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.scatter(X_train_r[:, 0], Y_train_r[:, 0], label="train",
marker="o", c="b", s=25)
plt.scatter(X_test_r[:, 0], Y_test_r[:, 0], label="test",
marker="o", c="r", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.scatter(X_train_r[:, 1], Y_train_r[:, 1], label="train",
marker="o", c="b", s=25)
plt.scatter(X_test_r[:, 1], Y_test_r[:, 1], label="test",
marker="o", c="r", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.scatter(X_train_r[:, 0], X_train_r[:, 1], label="train",
marker="*", c="b", s=50)
plt.scatter(X_test_r[:, 0], X_test_r[:, 1], label="test",
marker="*", c="r", s=50)
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.scatter(Y_train_r[:, 0], Y_train_r[:, 1], label="train",
marker="*", c="b", s=50)
plt.scatter(Y_test_r[:, 0], Y_test_r[:, 1], label="test",
marker="*", c="r", s=50)
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
# #############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
# #############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
|
the-stack_106_16400
|
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, groth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module("norm1", nn.BatchNorm2d(num_input_features))
self.add_module("relu1", nn.ReLU(inplace=True)) # 减小内存的参数量
self.add_module("conv1", nn.Conv2d(num_input_features, bn_size * groth_rate, kernel_size=1, stride=1, padding=1, bias=False))
self.add_module("norm2", nn.BatchNorm2d(bn_size * groth_rate))
self.add_module("relu2", nn.ReLU(inplace=True)) # 减小内存的参数量
self.add_module("conv2", nn.Conv2d(bn_size * groth_rate, groth_rate, kernel_size=3, stride=1, bias=False))
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate> 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, groth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features+i*groth_rate, groth_rate, bn_size, drop_rate)
self.add_module("denselayer%d" % (i+1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_feature, num_output_feature):
super(_Transition, self).__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_feature))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv", nn.Conv2d(num_input_feature, num_output_feature, kernel_size=1, stride=1, bias=False))
self.add_module("pool", nn.AvgPool2d(2, stride=2))
class DenseNet(nn.Module):
""""
DenseNet-BC model
param growth_rate: (int) number of filters used in DenseLayer, `k` in the paper
:param block_config: (list of 4 ints) number of layers in each DenseBlock
:param num_init_features: (int) number of filters in the first Conv2d
:param bn_size: (int) the factor using in the bottleneck layer
:param compression_rate: (float) the compression rate used in Transition Layer
:param drop_rate: (float) the drop rate after each DenseLayer
:param num_classes: (int) number of classes for classification
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64,
bn_size=4, compression_rate=0.5, drop_rate=0, num_classes=1000):
super(DenseNet, self).__init__()
self.features = nn.Sequential(OrderedDict([
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_features=num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
]))
# DenseBlock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers, num_features, bn_size, growth_rate, drop_rate)
self.features.add_module("denseblock%d" % (i+1), block)
num_features += num_layers * growth_rate
if i != len(block_config):
transition = _Transition(num_features, int(num_features * compression_rate))
self.features.add_module("transition%d" % (i+1), transition)
num_features = int(num_features*compression_rate)
# final bn+RelU
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
self.features.add_module(("relu5", nn.ReLU(inplace=True)))
# classfication layer
self.classfier = nn.Linear(num_features, num_classes)
# parms initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.bias, 0)
nn.init.constant(m.weight, 1)
elif isinstance(m, nn.Linear):
nn.init.constant(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.avg_pool2d(features, 7, stride=1).view(features.size(0), -1)
out = self.classfier(out)
return out
def densenet121(pretrained=False, **kwargs):
"""DenseNet121"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet121'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
if __name__ == "__main__":
densenet = densenet121(pretrained=True)
densenet.eval()
img = Image.open("./images/cat.jpg")
trans_ops = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
images = trans_ops(img).view(-1, 3, 224, 224)
print(images)
outputs = densenet(images)
_, predictions = outputs.topk(5, dim=1)
labels = list(map(lambda s: s.strip(), open("./data/imagenet/synset_words.txt").readlines()))
for idx in predictions.numpy()[0]:
print("Predicted labels:", labels[idx])
|
the-stack_106_16401
|
# Copyright 2020 ponai Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from ponai.networks.layers.factories import Norm, Act, split_args
from ponai.networks.nets.regressor import Regressor
class Classifier(Regressor):
"""
Defines a classification network from Regressor by specifying the output shape as a single dimensional tensor
with size equal to the number of classes to predict. The final activation function can also be specified, eg.
softmax or sigmoid.
"""
def __init__(
self,
in_shape,
classes,
channels,
strides,
kernel_size=3,
num_res_units=2,
act=Act.PRELU,
norm=Norm.INSTANCE,
dropout=None,
bias=True,
last_act=None,
):
super().__init__(in_shape, (classes,), channels, strides, kernel_size, num_res_units, act, norm, dropout, bias)
if last_act is not None:
last_act_name, last_act_args = split_args(last_act)
last_act_type = Act[last_act_name]
self.final.add_module("lastact", last_act_type(**last_act_args))
class Discriminator(Classifier):
"""
Defines a discriminator network from Classifier with a single output value and sigmoid activation by default. This
is meant for use with GANs or other applications requiring a generic discriminator network.
"""
def __init__(
self,
in_shape,
channels,
strides,
kernel_size=3,
num_res_units=2,
act=Act.PRELU,
norm=Norm.INSTANCE,
dropout=0.25,
bias=True,
last_act=Act.SIGMOID,
):
super().__init__(in_shape, 1, channels, strides, kernel_size, num_res_units, act, norm, dropout, bias, last_act)
class Critic(Classifier):
"""
Defines a critic network from Classifier with a single output value and no final activation. The final layer is
`nn.Flatten` instead of `nn.Linear`, the final result is computed as the mean over the first dimension. This is
meant to be used with Wassertein GANs.
"""
def __init__(
self,
in_shape,
channels,
strides,
kernel_size=3,
num_res_units=2,
act=Act.PRELU,
norm=Norm.INSTANCE,
dropout=0.25,
bias=True,
):
super().__init__(in_shape, 1, channels, strides, kernel_size, num_res_units, act, norm, dropout, bias, None)
def _get_final_layer(self, in_shape):
return nn.Flatten()
def forward(self, x):
x = self.net(x)
x = self.final(x)
x = x.mean(1)
return x.view((x.shape[0], -1))
|
the-stack_106_16404
|
"""Takes the data from the raw tables and processes it for use in the student timeseries data"""
from pandas import read_csv
from data.research.columns import REFColumns
from util.add_z_score import add_z_score
def extract_research_quality_metrics():
"""Takes the data from the raw tables and prepares it for the student timeseries data"""
converters = {
REFColumns.GRADE_4STAR_PERCENTAGE.value: lambda x: float(x)
if x.isnumeric()
else 0,
REFColumns.GRADE_3STAR_PERCENTAGE.value: lambda x: float(x)
if x.isnumeric()
else 0,
REFColumns.GRADE_2STAR_PERCENTAGE.value: lambda x: float(x)
if x.isnumeric()
else 0,
REFColumns.GRADE_1STAR_PERCENTAGE.value: lambda x: float(x)
if x.isnumeric()
else 0,
REFColumns.GRADE_UNCLASSIFIED_PERCENTAGE.value: lambda x: float(x)
if x.isnumeric()
else 0,
}
ref_2014_table = read_csv(
"data/research/raw/REF2014.csv", skiprows=7, converters=converters
)
add_research_quality_metrics(ref_2014_table)
ref_2014_table = filter_to_overall_score(ref_2014_table)
ref_2014_table_weighted_volume = calc_total_weighted_volume_per_uni(ref_2014_table)
ref_2014_table_quality_score = calc_avg_quality_score_per_uni(ref_2014_table)
ref_2014_table_output = ref_2014_table_weighted_volume.join(
ref_2014_table_quality_score
)
add_z_score(
ref_2014_table_output,
REFColumns.QUALITY_SCORE.value,
REFColumns.QUALITY_SCORE_Z.value,
)
add_z_score(
ref_2014_table_output,
REFColumns.QUALITY_WEIGHTED_VOLUME.value,
REFColumns.QUALITY_WEIGHTED_VOLUME_Z.value,
)
ref_2014_table_output[
[REFColumns.QUALITY_SCORE_Z.value, REFColumns.QUALITY_WEIGHTED_VOLUME_Z.value]
] = round(
ref_2014_table_output[
[
REFColumns.QUALITY_SCORE_Z.value,
REFColumns.QUALITY_WEIGHTED_VOLUME_Z.value,
]
],
2,
)
ref_2014_table_output = (
ref_2014_table_output.reset_index()
.melt(
id_vars=[
REFColumns.HE_PROVIDER_CODE.value,
REFColumns.HE_PROVIDER_NAME.value,
],
value_vars=[
REFColumns.QUALITY_SCORE_Z.value,
REFColumns.QUALITY_WEIGHTED_VOLUME_Z.value,
],
var_name="Metric",
value_name="Value",
)
.sort_values(by=REFColumns.HE_PROVIDER_CODE.value, ignore_index=True)
)
ref_2014_table_output.to_csv(
"data/research/research_quality_metrics.csv", index=False
)
def calc_avg_quality_score_per_uni(dataframe):
"""Calculate a dataframe with an average quality score for each uni"""
ref_2014_table_quality_score = dataframe[
[
REFColumns.HE_PROVIDER_CODE.value,
REFColumns.HE_PROVIDER_NAME.value,
REFColumns.FTE_STAFF.value,
REFColumns.QUALITY_SCORE.value,
]
]
ref_2014_table_quality_score = (
ref_2014_table_quality_score.groupby(
by=[
REFColumns.HE_PROVIDER_CODE.value,
REFColumns.HE_PROVIDER_NAME.value,
]
)
.sum()
.copy()
)
ref_2014_table_quality_score[REFColumns.QUALITY_SCORE.value] = (
ref_2014_table_quality_score[REFColumns.QUALITY_SCORE.value]
/ ref_2014_table_quality_score[REFColumns.FTE_STAFF.value]
)
return ref_2014_table_quality_score
def calc_total_weighted_volume_per_uni(dataframe):
"""Calculate the total weighted volume of research for each uni"""
ref_2014_table_weighted_volume = dataframe[
[
REFColumns.HE_PROVIDER_CODE.value,
REFColumns.HE_PROVIDER_NAME.value,
REFColumns.QUALITY_WEIGHTED_VOLUME.value,
]
]
ref_2014_table_weighted_volume = (
ref_2014_table_weighted_volume.groupby(
by=[
REFColumns.HE_PROVIDER_CODE.value,
REFColumns.HE_PROVIDER_NAME.value,
]
)
.sum()
.copy()
)
ref_2014_table_weighted_volume[REFColumns.QUALITY_WEIGHTED_VOLUME.value] = (
ref_2014_table_weighted_volume[REFColumns.QUALITY_WEIGHTED_VOLUME.value]
).copy()
return ref_2014_table_weighted_volume
def filter_to_overall_score(dataframe):
"""Filter out sub categories of ref scores"""
return dataframe[(dataframe[REFColumns.PROFILE.value] == "Overall")]
def add_research_quality_metrics(dataframe):
"""Add quality scores and quality weighted volume of research columns"""
dataframe[REFColumns.QUALITY_SCORE.value] = (
dataframe[REFColumns.FTE_STAFF.value]
* (
dataframe[REFColumns.GRADE_4STAR_PERCENTAGE.value]
+ dataframe[REFColumns.GRADE_3STAR_PERCENTAGE.value]
)
/ (
dataframe[REFColumns.GRADE_4STAR_PERCENTAGE.value]
+ dataframe[REFColumns.GRADE_3STAR_PERCENTAGE.value]
+ dataframe[REFColumns.GRADE_2STAR_PERCENTAGE.value]
)
)
dataframe[REFColumns.QUALITY_WEIGHTED_VOLUME.value] = (
dataframe[REFColumns.FTE_STAFF.value]
* (
dataframe[REFColumns.GRADE_4STAR_PERCENTAGE.value] * 4
+ dataframe[REFColumns.GRADE_3STAR_PERCENTAGE.value]
)
/ 500
)
|
the-stack_106_16405
|
# -*- coding: utf-8 -*-
"""
@author: Junxiao Song
"""
from __future__ import print_function
import numpy as np
class Board(object):
"""board for the game"""
def __init__(self, **kwargs):
self.width = int(kwargs.get('width', 8))
self.height = int(kwargs.get('height', 8))
# board states stored as a dict,
# key: move as location on the board,
# value: player as pieces type
self.states = {}
# need how many pieces in a row to win
self.n_in_row = int(kwargs.get('n_in_row', 5))
self.players = [1, 2] # player1 and player2
def init_board(self, start_player=0):
if self.width < self.n_in_row or self.height < self.n_in_row:
raise Exception('board width and height can not be '
'less than {}'.format(self.n_in_row))
self.current_player = self.players[start_player] # start player
# keep available moves in a list
self.availables = list(range(self.width * self.height))
self.states = {}
self.last_move = -1
def move_to_location(self, move):
"""
3*3 board's moves like:
6 7 8
3 4 5
0 1 2
and move 5's location is (1,2)
"""
h = move // self.width
w = move % self.width
return [h, w]
def location_to_move(self, location):
if len(location) != 2:
return -1
h = location[0]
w = location[1]
move = h * self.width + w
if move not in range(self.width * self.height):
return -1
return move
def current_state(self):
"""return the board state from the perspective of the current player.
state shape: 4*width*height
"""
square_state = np.zeros((4, self.width, self.height))
if self.states:
moves, players = np.array(list(zip(*self.states.items())))
move_curr = moves[players == self.current_player]
move_oppo = moves[players != self.current_player]
square_state[0][move_curr // self.width,
move_curr % self.height] = 1.0
square_state[1][move_oppo // self.width,
move_oppo % self.height] = 1.0
# indicate the last move location
square_state[2][self.last_move // self.width,
self.last_move % self.height] = 1.0
if len(self.states) % 2 == 0:
square_state[3][:, :] = 1.0 # indicate the colour to play
return square_state[:, ::-1, :]
def do_move(self, move):
self.states[move] = self.current_player
self.availables.remove(move)
self.current_player = (
self.players[0] if self.current_player == self.players[1]
else self.players[1]
)
self.last_move = move
def has_a_winner(self):
width = self.width
height = self.height
states = self.states
n = self.n_in_row
moved = list(set(range(width * height)) - set(self.availables))
if len(moved) < self.n_in_row + 2:
return False, -1
for m in moved:
h = m // width
w = m % width
player = states[m]
if (w in range(width - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n))) == 1):
return True, player
if (h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * width, width))) == 1):
return True, player
if (w in range(width - n + 1) and h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * (width + 1), width + 1))) == 1):
return True, player
if (w in range(n - 1, width) and h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * (width - 1), width - 1))) == 1):
return True, player
return False, -1
def game_end(self):
"""Check whether the game is ended or not"""
win, winner = self.has_a_winner()
if win:
return True, winner
elif not len(self.availables):
return True, -1
return False, -1
def get_current_player(self):
return self.current_player
class Game(object):
"""game server"""
def __init__(self, board, **kwargs):
self.board = board
def graphic(self, board, player1, player2):
"""Draw the board and show game info"""
width = board.width
height = board.height
print("Player", player1, "with X".rjust(3))
print("Player", player2, "with O".rjust(3))
print()
for x in range(width):
print("{0:8}".format(x), end='')
print('\r\n')
for i in range(height - 1, -1, -1):
print("{0:4d}".format(i), end='')
for j in range(width):
loc = i * width + j
p = board.states.get(loc, -1)
if p == player1:
print('X'.center(8), end='')
elif p == player2:
print('O'.center(8), end='')
else:
print('_'.center(8), end='')
print('\r\n\r\n')
def start_play(self, player1, player2, start_player=0, is_shown=1):
"""start a game between two players"""
if start_player not in (0, 1):
raise Exception('start_player should be either 0 (player1 first) '
'or 1 (player2 first)')
self.board.init_board(start_player)
p1, p2 = self.board.players
player1.set_player_ind(p1)
player2.set_player_ind(p2)
players = {p1: player1, p2: player2}
if is_shown:
self.graphic(self.board, player1.player, player2.player)
while True:
current_player = self.board.get_current_player()
player_in_turn = players[current_player]
move = player_in_turn.get_action(self.board)
self.board.do_move(move)
if is_shown:
self.graphic(self.board, player1.player, player2.player)
end, winner = self.board.game_end()
if end:
if is_shown:
if winner != -1:
print("Game end. Winner is", players[winner])
else:
print("Game end. Tie")
return winner
def start_self_play(self, player, is_shown=0, temp=1e-3):
""" start a self-play game using a MCTS player, reuse the search tree,
and store the self-play data: (state, mcts_probs, z) for training
"""
self.board.init_board()
p1, p2 = self.board.players
states, mcts_probs, current_players = [], [], []
while True:
move, move_probs = player.get_action(self.board,
temp=temp,
return_prob=1)
# store the data
states.append(self.board.current_state())
mcts_probs.append(move_probs)
current_players.append(self.board.current_player)
# perform a move
self.board.do_move(move)
if is_shown:
self.graphic(self.board, p1, p2)
end, winner = self.board.game_end()
if end:
# winner from the perspective of the current player of each state
winners_z = np.zeros(len(current_players))
if winner != -1:
winners_z[np.array(current_players) == winner] = 1.0
winners_z[np.array(current_players) != winner] = -1.0
# reset MCTS root node
player.reset_player()
if is_shown:
if winner != -1:
print("Game end. Winner is player:", winner)
else:
print("Game end. Tie")
return winner, zip(states, mcts_probs, winners_z)
|
the-stack_106_16407
|
import sys
import os
import httplib2
import urllib.request as urllib2
import logging
class Downloader(object):
def __init__(self):
self.filepath = None
def get_filepath(self):
return self.filepath
def download(self, url, path, fallback_filename):
logging.debug("Downloading URL {}".format(url))
try:
remotefile = urllib2.urlopen(url)
except URLError:
logging.error("URL could not be opened. Aborting.")
return None
filename = remotefile.info()['Content-Disposition']
if filename is None:
filename = fallback_filename
logging.debug("Filename is {}".format(filename))
self.filepath = os.path.join(path, filename)
CHUNK = 16 * 1024
with open(self.filepath, "wb") as fp:
while True:
chunk = remotefile.read(CHUNK)
if not chunk: break
fp.write(chunk)
return self.filepath
|
the-stack_106_16408
|
"""
Defines:
- ResultSet(allowed_results)
Attributes
----------
- allowed
- found
- saved
Methods
-------
- is_saved(result)
- is_not_saved(result)
- clear()
- add(result)
- remove(results)
- _found_result(result)
- update(self, results)
"""
import re
from copy import deepcopy
class ResultSet:
"""
This class is private storage interface class.
It's an interface tool between the code and the results the user requests.
"""
def __init__(self, allowed_results, results_map, unused_log):
#self.log = log
#allowed_results.sort()
#for a in allowed_results:
#print(a)
# the full set of allowable results
self.allowed = set(allowed_results)
#assert 'responses.convergence_data' in allowed_results
# the set of results that have been found
self.found = set()
# the set of results to be saved
self.saved = deepcopy(self.allowed)
self.results_map = results_map
def is_saved(self, result):
"""checks to see if a result is saved"""
if result not in self.allowed:
#allowed2 = list(self.allowed)
#allowed2.sort()
msg = "result=%r is invalid; the name changed or it's a typo.\n" % result
if '.' in result:
base, end = result.split('.', 1)
#print(base, end)
#print(self.allowed)
#print(f'base={base} end={end}')
#print(self.results_map)
if base in self.results_map:
results_obj = self.results_map[base]
msg += 'Potential results include:\n - ' + '\n - '.join(results_obj.get_table_types())
assert result in results_obj.get_table_types()
#print(results_obj.get_table_types())
raise RuntimeError(msg.rstrip())
if result in self.saved:
#self.log.debug(' %s is being read' % result)
return True
#self.log.debug(' %s was skipped' % result)
return False
def is_not_saved(self, result):
"""checks to see if a result is saved"""
return not self.is_saved(result)
def clear(self):
"""clears all the results"""
self.saved.clear()
def add(self, results):
"""addds a list/str of results"""
all_matched_results = self._get_matched_results(results)
for result in all_matched_results:
if result not in self.saved:
self.saved.add(result)
def remove(self, results):
"""removes a list/str of results"""
all_matched_results = self._get_matched_results(results)
for result in all_matched_results:
if result in self.saved:
self.saved.remove(result)
#disable_set = set(results)
#self.saved.difference(disable_set)
def _get_matched_results(self, results):
"""handles expansion of regexs"""
if isinstance(results, str):
results = [results]
all_matched_results = []
for result in results:
# tack on a word boundary if we have a * at the beginning of the regex
resulti = r'\w' + result if result.startswith('*') else result
regex = re.compile(resulti)
matched_results = list(filter(regex.match, self.allowed))
if len(matched_results) == 0:
#allowed = list(self.allowed)
#allowed.sort()
#raise RuntimeError('%r is not a valid result to remove\nallowed=[%s]' % (
#result, ', '.join(allowed)))
raise RuntimeError(f'{result!r} is not a valid result to remove\n{self}\n'
f'{result!r} is not a valid result to remove')
all_matched_results.extend(matched_results)
return all_matched_results
def _found_result(self, result):
if result not in self.allowed:
msg = "result=%r is invalid; the name changed or it's a typo" % result
raise RuntimeError(msg) # check line ~640 in op2_f06_common.py if this is a new result
self.found.add(result)
def update(self, results):
for result in results:
self.add(result)
#def add_found_result(self, result):
#pass
def __repr__(self):
"""defines the repr"""
msg = 'ResultSet:\n'
msg += ' results:\n'
for result in sorted(self.allowed):
if result in self.saved:
msg += ' %s\n' % result
else:
msg += ' %s (disabled)\n' % result
return msg
|
the-stack_106_16410
|
"""distutils.command.build_py
Implements the Distutils 'build_py' command."""
__revision__ = "$Id$"
import os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsFileError
from distutils.util import convert_path
from distutils import log
class build_py(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files])
return files
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
|
the-stack_106_16411
|
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Ruixiong Zhang; Lan Yu;
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" Implementation of GriffinLim vocoder """
import os
import numpy as np
import librosa
from scipy.io.wavfile import write as write_wav
class GriffinLim:
def __init__(self, data_descriptions):
""" Reference to paper "Multiband Excitation Vocoder"
"""
assert data_descriptions.audio_featurizer is not None
assert data_descriptions.audio_featurizer.feat is not None
assert data_descriptions.hparams.audio_config is not None
params_func = data_descriptions.audio_featurizer.feat.params
params = params_func(data_descriptions.hparams.audio_config)
self.channels = params.filterbank_channel_count
self.sample_rate = params.sample_rate
self.window_length = int(params.window_length * self.sample_rate)
self.hop_length = int(params.frame_length * self.sample_rate)
self.n_fft = self._get_nfft(self.window_length)
self.lower_frequency_limit = params.lower_frequency_limit
self.upper_frequency_limit = params.upper_frequency_limit
self.window_type = params.window_type
self.EPS = 1e-10
def _get_nfft(self, window_length):
""" n_fft is an exponential power of 2 closest to and larger than win_length
"""
nfft = 2
while nfft < window_length:
nfft *= 2
return nfft
def __call__(self, feats, hparams, name=None):
linear_feats = self._logmel_to_linear(feats)
samples = self._griffin_lim(linear_feats, hparams.gl_iters)
#samples = samples / 32768
if not os.path.exists(hparams.output_directory):
os.makedirs(hparams.output_directory)
output_path = os.path.join(hparams.output_directory, '%s.wav' % str(name))
write_wav(output_path,
self.sample_rate,
(samples * np.iinfo(np.int16).max).astype(np.int16))
def _logmel_to_linear(self, feats):
"""Convert FBANK to linear spectrogram.
Args:
feats: FBANK feats, shape: [length, channels]
Returns:
linear_feats: Linear spectrogram
"""
assert feats.shape[1] == self.channels
linear_feats = np.power(10.0, feats)
linear_basis = librosa.filters.mel(self.sample_rate,
self.n_fft,
self.channels,
self.lower_frequency_limit,
self.upper_frequency_limit)
linear_basis = np.linalg.pinv(linear_basis)
linear_feats = np.maximum(self.EPS, np.dot(linear_basis, linear_feats.T).T)
return linear_feats
def _griffin_lim(self, linear_feats, gl_iters):
"""Convert linear spectrogram into waveform
Args:
linear_feats: linear spectrogram
gl_iters:
Returns:
waveform: Reconstructed waveform (N,).
"""
assert linear_feats.shape[1] == self.n_fft // 2 + 1
linear_feats = np.abs(linear_feats.T)
samples = librosa.griffinlim(S=linear_feats,
n_iter=gl_iters,
hop_length=self.hop_length,
win_length=self.window_length,
window=self.window_type)
return samples
|
the-stack_106_16413
|
import base64
import requests
import re
import os
from github import Github
START_COMMENT = '<!--START_SECTION:waka-->'
END_COMMENT = '<!--END_SECTION:waka-->'
listReg = f'{START_COMMENT}[\\s\\S]+{END_COMMENT}'
user = os.getenv("INPUT_USERNAME")
waka_key = os.getenv("INPUT_WAKATIME_API_KEY")
ghtoken = os.getenv("INPUT_GH_TOKEN")
def makeGraph(percent: float):
done_block = "█"
empty_block = "░"
pc_rnd = round(percent)
return (f'{done_block*int(pc_rnd/4)}{empty_block*int( 25-int(pc_rnd/4))}')
def getStats():
data = requests.get(
f"https://wakatime.com/api/v1/users/current/stats/last_7_days?api_key={waka_key}").json()
lang_data = data['data']['languages']
data_list = []
for l in lang_data[:5]:
ln = len(l['name'])
ln_text = len(l['text'])
op = f"{l['name']}{' '*(12-ln)}{l['text']}{' '*(20-ln_text)}{makeGraph(l['percent'])} {l['percent']}"
data_list.append(op)
data = " \n".join(data_list)
return ("```text\n"+data+"\n```")
def decodeReadme(data: str):
decodedBytes = base64.b64decode(data)
return str(decodedBytes, "utf-8")
def generatenewReadme(stats: str, readme: str):
statsinReadme = f"{START_COMMENT}\n{stats}\n{END_COMMENT}"
return re.sub(listReg, statsinReadme, readme)
if __name__ == '__main__':
g = Github(ghtoken)
repo = g.get_repo(f"{user}/{user}")
contents = repo.get_readme()
stats = getStats()
rdmd = decodeReadme(contents.content)
newreadme = generatenewReadme(stats=stats, readme=rdmd)
if newreadme != rdmd:
repo.update_file(path=contents.path, message="Updated with Dev Metrics",
content=newreadme, sha=contents.sha, branch="master")
|
the-stack_106_16415
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
return "[%s] %s" % (datastore_name, path)
def split_datastore_path(datastore_path):
"""
Split the VMware style datastore path to get the Datastore
name and the entity path.
"""
spl = datastore_path.split('[', 1)[1].split(']', 1)
path = ""
if len(spl) == 1:
datastore_url = spl[0]
else:
datastore_url, path = spl
return datastore_url, path.strip()
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = create_network_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
i = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % i
opt.value = vif_info['iface_id']
extra_config.append(opt)
i += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key, adapter_type="lsiLogic"):
"""
Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == "busLogic":
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == "lsiLogicsas":
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def create_network_spec(client_factory, vif_info):
"""
Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
if vif_info['vif_model'] == "e1000":
vif_info['vif_model'] = "VirtualE1000"
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_vmdk_attach_config_spec(client_factory,
adapter_type="lsiLogic",
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# The controller Key pertains to the Key of the LSI Logic Controller, which
# controls this Hard Disk
device_config_spec = []
# For IDE devices, there are these two default controllers created in the
# VM having keys 200 and 201
if controller_key is None:
if adapter_type == "ide":
controller_key = 200
else:
controller_key = -101
controller_spec = create_controller_spec(client_factory,
controller_key,
adapter_type)
device_config_spec.append(controller_spec)
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
# For IDE devices, there are these two default controllers created in the
# VM having keys 200 and 201
controller_key = 200
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
unit_number = 0
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = "preallocated"
if device.unitNumber > unit_number:
unit_number = device.unitNumber
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = "lsiLogic"
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = "busLogic"
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = "ide"
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = "lsiLogicsas"
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, vmdk_controller_key, adapter_type,
disk_type, unit_number)
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_copy_virtual_disk_spec(client_factory, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = get_vmdk_adapter_type(adapter_type)
dest_spec.diskType = disk_type
return dest_spec
def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic",
disk_type="rdmp"):
"""Builds the RDM virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.device = device
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""
Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""
Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def search_datastore_spec(client_factory, file_name):
"""Builds the datastore search spec."""
search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
search_spec.matchPattern = [file_name]
return search_spec
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
def get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
vm_ref = get_vm_ref_from_uuid(session, instance['uuid'])
if not vm_ref:
vm_ref = get_vm_ref_from_name(session, instance['name'])
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""
This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''
Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if runtime_summary.connectionState == "connected":
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
return _get_object_from_results(session, cls, cluster_name,
_get_object_for_value)
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
http://pubs.vmware.com/vsphere-51/index.jsp
#com.vmware.wssdk.apiref.doc/
vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
#TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def _get_datastore_ref_and_name(data_stores, datastore_regex=None):
# selects the datastore with the most freespace
"""Find a usable datastore in a given RetrieveResult object.
:param data_stores: a RetrieveResult object from vSphere API call
:param datastore_regex: an optional regular expression to match names
:return: datastore_ref, datastore_name, capacity, freespace
"""
DSRecord = collections.namedtuple(
'DSRecord', ['datastore', 'name', 'capacity', 'freespace'])
# we lean on checks performed in caller methods to validate the
# datastore reference is not None. If it is, the caller handles
# a None reference as appropriate in its context.
found_ds = DSRecord(datastore=None, name=None, capacity=None, freespace=0)
# datastores is actually a RetrieveResult object from vSphere API call
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = propset_dict(obj_content.propSet)
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
ds_type = propdict['summary.type']
ds_name = propdict['summary.name']
if ((ds_type == 'VMFS' or ds_type == 'NFS') and
propdict.get('summary.accessible')):
if datastore_regex is None or datastore_regex.match(ds_name):
new_ds = DSRecord(
datastore=obj_content.obj,
name=ds_name,
capacity=propdict['summary.capacity'],
freespace=propdict['summary.freeSpace'])
# find the largest freespace to return
if new_ds.freespace > found_ds.freespace:
found_ds = new_ds
#TODO(hartsocks): refactor driver to use DSRecord namedtuple
# using DSRecord through out will help keep related information
# together and improve readability and organisation of the code.
if found_ds.datastore is not None:
return (found_ds.datastore, found_ds.name,
found_ds.capacity, found_ds.freespace)
def get_datastore_ref_and_name(session, cluster=None, host=None,
datastore_regex=None):
"""Get the datastore list and choose the first local storage."""
if cluster is None and host is None:
data_stores = session._call_method(vim_util, "get_objects",
"Datastore", ["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
else:
if cluster is not None:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource", "datastore")
else:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", host,
"HostSystem", "datastore")
if not datastore_ret:
raise exception.DatastoreNotFound()
data_store_mors = datastore_ret.ManagedObjectReference
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
while data_stores:
token = _get_token(data_stores)
results = _get_datastore_ref_and_name(data_stores, datastore_regex)
if results:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return results
if token:
data_stores = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
if datastore_regex:
raise exception.DatastoreNotFound(
_("Datastore regex %s did not match any datastores")
% datastore_regex.pattern)
else:
raise exception.DatastoreNotFound()
raise exception.DatastoreNotFound()
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
dict_mors = {
'respool-1001': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
'domain-1002': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
}
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type == "lsiLogicsas":
vmdk_adapter_type = "lsiLogic"
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
|
the-stack_106_16416
|
#!/usr/bin/env python3
import arrow
from bs4 import BeautifulSoup
import datetime
import re
import requests
import pandas as pd
from pytz import timezone
ab_timezone = 'Canada/Mountain'
def convert_time_str(ts):
"""Takes a time string and converts into an aware datetime object."""
dt_naive = datetime.datetime.strptime(ts, ' %b %d, %Y %H:%M')
localtz = timezone('Canada/Mountain')
dt_aware = localtz.localize(dt_naive)
return dt_aware
def fetch_production(zone_key='CA-AB', session=None, target_datetime=None, logger=None) -> dict:
"""Requests the last known production mix (in MW) of a given country."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/CSDReportServlet'
response = r.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
findtime = soup.find('td', text=re.compile('Last Update')).get_text()
time_string = findtime.split(':', 1)[1]
dt = convert_time_str(time_string)
df_generations = pd.read_html(response.text, match='GENERATION', skiprows=1, index_col=0, header=0)
for idx, df in enumerate(df_generations):
try:
total_net_generation = df_generations[idx]['TNG']
maximum_capability = df_generations[idx]['MC']
except KeyError:
continue
return {
'datetime': dt,
'zoneKey': zone_key,
'production': {
'gas': float(total_net_generation['GAS']),
'hydro': float(total_net_generation['HYDRO']),
'solar': float(total_net_generation['SOLAR']),
'wind': float(total_net_generation['WIND']),
'biomass': float(total_net_generation['OTHER']),
'unknown': float(total_net_generation['DUAL FUEL']),
'coal': float(total_net_generation['COAL'])
},
'storage': {
'battery': float(total_net_generation['ENERGY STORAGE'])
},
'capacity': {
'gas': float(maximum_capability['GAS']),
'hydro': float(maximum_capability['HYDRO']),
'battery storage': float(maximum_capability['ENERGY STORAGE']),
'solar': float(maximum_capability['SOLAR']),
'wind': float(maximum_capability['WIND']),
'biomass': float(maximum_capability['OTHER']),
'unknown': float(maximum_capability['DUAL FUEL']),
'coal': float(maximum_capability['COAL'])
},
'source': 'ets.aeso.ca',
}
def fetch_price(zone_key='CA-AB', session=None, target_datetime=None, logger=None) -> dict:
"""Requests the last known power price of a given country."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/SMPriceReportServlet?contentType=html/'
response = r.get(url)
df_prices = pd.read_html(response.text, match='Price', index_col=0, header=0)
prices = df_prices[1]
data = {}
for rowIndex, row in prices.iterrows():
price = row['Price ($)']
if (isfloat(price)):
hour = int(rowIndex.split(' ')[1]) - 1
data[rowIndex] = {
'datetime': arrow.get(rowIndex, 'MM/DD/YYYY').replace(hour=hour, tzinfo=ab_timezone).datetime,
'zoneKey': zone_key,
'currency': 'CAD',
'source': 'ets.aeso.ca',
'price': float(price),
}
return [data[k] for k in sorted(data.keys())]
def fetch_exchange(zone_key1='CA-AB', zone_key2='CA-BC', session=None, target_datetime=None, logger=None) -> dict:
"""Requests the last known power exchange (in MW) between two countries."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/CSDReportServlet'
response = r.get(url)
df_exchanges = pd.read_html(response.text, match='INTERCHANGE', skiprows=0, index_col=0)
flows = {
'CA-AB->CA-BC': df_exchanges[1][1]['British Columbia'],
'CA-AB->CA-SK': df_exchanges[1][1]['Saskatchewan'],
'CA-AB->US-MT': df_exchanges[1][1]['Montana'],
'CA-AB->US-NW-NWMT': df_exchanges[1][1]['Montana']
}
sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))
if sortedZoneKeys not in flows:
raise NotImplementedError('This exchange pair is not implemented')
return {
'datetime': arrow.now(tz=ab_timezone).datetime,
'sortedZoneKeys': sortedZoneKeys,
'netFlow': float(flows[sortedZoneKeys]),
'source': 'ets.aeso.ca'
}
def isfloat(value) -> bool:
try:
float(value)
return True
except ValueError:
return False
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
print('fetch_price() ->')
print(fetch_price())
print('fetch_exchange(CA-AB, CA-BC) ->')
print(fetch_exchange('CA-AB', 'CA-BC'))
print('fetch_exchange(CA-AB, CA-SK) ->')
print(fetch_exchange('CA-AB', 'CA-SK'))
print('fetch_exchange(CA-AB, US-MT) ->')
print(fetch_exchange('CA-AB', 'US-MT'))
|
the-stack_106_16417
|
#usr/bin/python3.9
from pathlib import Path
class filetypeDetector(object):
FILETYPE_PDF = 'pdf'
FILETYPE_TIFF = 'tiff'
FILETYPE_JPEG = 'jpeg'
FILETYPE_PNG = 'png'
def __init__(self, file_path: str):
self.file = Path(file_path)
def detect(self):
file_handle = open(self.file.resolve(), "rb")
try:
byte = file_handle.read(10)
finally:
file_handle.close()
# PDF
if byte[0:4] == b'%PDF':
return self.FILETYPE_PDF
# TIFF
if byte[0:4] == b'\x4D\x4D\x00\x2A' or \
byte[0:4] == b'\x4D\x4D\x00\x2B' or \
byte[0:4] == b'\x49\x49\x2A\x00' or \
byte[0:4] == b'\x49\x49\x2B\x00':
return self.FILETYPE_TIFF
# JPEG
if byte[0:2] == b'\xFF\xD8':
return self.FILETYPE_JPEG
# PNG
if byte[0:8] == b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A':
return self.FILETYPE_PNG
return None
|
the-stack_106_16418
|
from graph.ApplyFunction import ApplyFunction
class Transversal:
def __init__(self, graph, origin, udf):
self.graph = graph
self.origin = origin
self.udf = udf
self.app = ApplyFunction(graph, [])
if origin[0].source:
self.orientation = "successors"
elif origin[0].sink:
self.orientation = "predecessors"
else:
print("BAD DEFINED ORIGIN")
return
# Transversal debe ejecutar desde cada source, debe manejar una lista de binaries concatenados
for operator in iter(origin):
print("operator: ", operator.id)
node = graph.get_node(operator.id)
self.app.visit_node(
node=node,
udf=self.udf,
orientation=self.orientation,
last_iter=None
)
def get_collected_data(self):
return self.app.get_collection()
#class Results
|
the-stack_106_16419
|
"""
Visitor hierarchy to inspect and/or create IETs.
The main Visitor class is adapted from https://github.com/coneoproject/COFFEE.
"""
from __future__ import absolute_import
import inspect
from collections import Iterable, OrderedDict, defaultdict
from operator import attrgetter
import cgen as c
import numpy as np
from devito.cgen_utils import blankline, ccode
from devito.dimension import LoweredDimension
from devito.exceptions import VisitorException
from devito.ir.iet.nodes import Iteration, Node, UnboundedIndex
from devito.types import Scalar
from devito.tools import as_tuple, filter_ordered, filter_sorted, flatten, ctypes_to_C
__all__ = ['FindNodes', 'FindSections', 'FindSymbols', 'MapExpressions',
'IsPerfectIteration', 'SubstituteExpression', 'printAST', 'CGen',
'ResolveTimeStepping', 'Transformer', 'NestedTransformer',
'FindAdjacentIterations', 'MergeOuterIterations', 'MapIteration']
class Visitor(object):
"""
A generic visitor for an Expression/Iteration tree.
To define handlers, subclasses should define :data:`visit_Foo`
methods for each class :data:`Foo` they want to handle.
If a specific method for a class :data:`Foo` is not found, the MRO
of the class is walked in order until a matching method is found.
The method signature is:
.. code-block::
def visit_Foo(self, o, [*args, **kwargs]):
pass
The handler is responsible for visiting the children (if any) of
the node :data:`o`. :data:`*args` and :data:`**kwargs` may be
used to pass information up and down the call stack. You can also
pass named keyword arguments, e.g.:
.. code-block::
def visit_Foo(self, o, parent=None, *args, **kwargs):
pass
"""
def __init__(self):
handlers = {}
# visit methods are spelt visit_Foo.
prefix = "visit_"
# Inspect the methods on this instance to find out which
# handlers are defined.
for (name, meth) in inspect.getmembers(self, predicate=inspect.ismethod):
if not name.startswith(prefix):
continue
# Check the argument specification
# Valid options are:
# visit_Foo(self, o, [*args, **kwargs])
argspec = inspect.getargspec(meth)
if len(argspec.args) < 2:
raise RuntimeError("Visit method signature must be "
"visit_Foo(self, o, [*args, **kwargs])")
handlers[name[len(prefix):]] = meth
self._handlers = handlers
"""
:attr:`default_args`. A dict of default keyword arguments for the visitor.
These are not used by default in :meth:`visit`, however, a caller may pass
them explicitly to :meth:`visit` by accessing :attr:`default_args`.
For example::
.. code-block::
v = FooVisitor()
v.visit(node, **v.default_args)
"""
default_args = {}
@classmethod
def default_retval(cls):
"""A method that returns an object to use to populate return values.
If your visitor combines values in a tree-walk, it may be useful to
provide a object to combine the results into. :meth:`default_retval`
may be defined by the visitor to be called to provide an empty object
of appropriate type.
"""
return None
def lookup_method(self, instance):
"""Look up a handler method for a visitee.
:param instance: The instance to look up a method for.
"""
cls = instance.__class__
try:
# Do we have a method handler defined for this type name
return self._handlers[cls.__name__]
except KeyError:
# No, walk the MRO.
for klass in cls.mro()[1:]:
entry = self._handlers.get(klass.__name__)
if entry:
# Save it on this type name for faster lookup next time
self._handlers[cls.__name__] = entry
return entry
raise RuntimeError("No handler found for class %s", cls.__name__)
def visit(self, o, *args, **kwargs):
"""Apply this :class:`Visitor` to an AST.
:param o: The :class:`Node` to visit.
:param args: Optional arguments to pass to the visit methods.
:param kwargs: Optional keyword arguments to pass to the visit methods.
"""
meth = self.lookup_method(o)
return meth(o, *args, **kwargs)
def visit_object(self, o, **kwargs):
return self.default_retval()
def visit_Node(self, o, **kwargs):
return self.visit(o.children, **kwargs)
def reuse(self, o, *args, **kwargs):
"""A visit method to reuse a node, ignoring children."""
return o
def maybe_rebuild(self, o, *args, **kwargs):
"""A visit method that rebuilds nodes if their children have changed."""
ops, okwargs = o.operands()
new_ops = [self.visit(op, *args, **kwargs) for op in ops]
if all(a is b for a, b in zip(ops, new_ops)):
return o
return o._rebuild(*new_ops, **okwargs)
def always_rebuild(self, o, *args, **kwargs):
"""A visit method that always rebuilds nodes."""
ops, okwargs = o.operands()
new_ops = [self.visit(op, *args, **kwargs) for op in ops]
return o._rebuild(*new_ops, **okwargs)
class PrintAST(Visitor):
_depth = 0
"""
Return a representation of the Iteration/Expression tree as a string,
highlighting tree structure and node properties while dropping non-essential
information.
"""
def __init__(self, verbose=True):
super(PrintAST, self).__init__()
self.verbose = verbose
@classmethod
def default_retval(cls):
return "<>"
@property
def indent(self):
return ' ' * self._depth
def visit_Node(self, o):
return self.indent + '<%s>' % o.__class__.__name__
def visit_Generable(self, o):
body = ' %s' % str(o) if self.verbose else ''
return self.indent + '<C.%s%s>' % (o.__class__.__name__, body)
def visit_Element(self, o):
body = ' %s' % str(o.element) if self.verbose else ''
return self.indent + '<Element%s>' % body
def visit_Callable(self, o):
self._depth += 1
body = self.visit(o.children)
self._depth -= 1
return self.indent + '<Callable %s>\n%s' % (o.name, body)
def visit_list(self, o):
return ('\n').join([self.visit(i) for i in o])
def visit_tuple(self, o):
return '\n'.join([self.visit(i) for i in o])
def visit_Block(self, o):
self._depth += 1
if self.verbose:
body = [self.visit(o.header), self.visit(o.body), self.visit(o.footer)]
else:
body = [self.visit(o.body)]
self._depth -= 1
return self.indent + "<%s>\n%s" % (o.__class__.__name__, '\n'.join(body))
def visit_Iteration(self, o):
self._depth += 1
body = self.visit(o.children)
self._depth -= 1
if self.verbose:
detail = '::%s::%s::%s' % (o.index, o.limits, o.offsets)
props = [str(i) for i in o.properties]
props = '[%s] ' % ','.join(props) if props else ''
else:
detail, props = '', ''
return self.indent + "<%sIteration %s%s>\n%s" % (props, o.dim.name, detail, body)
def visit_Expression(self, o):
if self.verbose:
body = "%s = %s" % (o.expr.lhs, o.expr.rhs)
return self.indent + "<Expression %s>" % body
else:
return self.indent + str(o)
class CGen(Visitor):
"""
Return a representation of the Iteration/Expression tree as a :module:`cgen` tree.
"""
def _args_decl(self, args):
"""Convert an iterable of :class:`Argument` into cgen format."""
ret = []
for i in args:
if i.is_ScalarArgument:
ret.append(c.Value('const %s' % c.dtype_to_ctype(i.dtype), i.name))
elif i.is_TensorArgument:
ret.append(c.Value(c.dtype_to_ctype(i.dtype),
'*restrict %s_vec' % i.name))
else:
ret.append(c.Value('void', '*_%s' % i.name))
return ret
def _args_cast(self, args):
"""Build cgen type casts for an iterable of :class:`Argument`."""
ret = []
for i in args:
if i.is_TensorArgument:
align = "__attribute__((aligned(64)))"
shape = ''.join(["[%s]" % ccode(j)
for j in i.provider.symbolic_shape[1:]])
lvalue = c.POD(i.dtype, '(*restrict %s)%s %s' % (i.name, shape, align))
rvalue = '(%s (*)%s) %s' % (c.dtype_to_ctype(i.dtype), shape,
'%s_vec' % i.name)
ret.append(c.Initializer(lvalue, rvalue))
elif i.is_PtrArgument:
ctype = ctypes_to_C(i.dtype)
lvalue = c.Pointer(c.Value(ctype, i.name))
rvalue = '(%s*) %s' % (ctype, '_%s' % i.name)
ret.append(c.Initializer(lvalue, rvalue))
return ret
def visit_tuple(self, o):
return tuple(self.visit(i) for i in o)
def visit_Block(self, o):
body = flatten(self.visit(i) for i in o.children)
return c.Module(o.header + (c.Block(body),) + o.footer)
def visit_List(self, o):
body = flatten(self.visit(i) for i in o.children)
return c.Module(o.header + (c.Collection(body),) + o.footer)
def visit_Element(self, o):
return o.element
def visit_Expression(self, o):
return c.Assign(ccode(o.expr.lhs), ccode(o.expr.rhs))
def visit_LocalExpression(self, o):
return c.Initializer(c.Value(c.dtype_to_ctype(o.dtype),
ccode(o.expr.lhs)), ccode(o.expr.rhs))
def visit_Call(self, o):
return c.Statement('%s(%s)' % (o.name, ','.join(o.params)))
def visit_Iteration(self, o):
body = flatten(self.visit(i) for i in o.children)
# Start
if o.offsets[0] != 0:
start = "%s + %s" % (o.limits[0], -o.offsets[0])
try:
start = eval(start)
except (NameError, TypeError):
pass
else:
start = o.limits[0]
# Bound
if o.offsets[1] != 0:
end = "%s - %s" % (o.limits[1], o.offsets[1])
try:
end = eval(end)
except (NameError, TypeError):
pass
else:
end = o.limits[1]
# For reverse dimensions flip loop bounds
if o.reverse:
loop_init = 'int %s = %s' % (o.index, ccode('%s - 1' % end))
loop_cond = '%s >= %s' % (o.index, ccode(start))
loop_inc = '%s -= %s' % (o.index, o.limits[2])
else:
loop_init = 'int %s = %s' % (o.index, ccode(start))
loop_cond = '%s < %s' % (o.index, ccode(end))
loop_inc = '%s += %s' % (o.index, o.limits[2])
# Append unbounded indices, if any
if o.uindices:
uinit = ['%s = %s' % (i.index, ccode(i.start)) for i in o.uindices]
loop_init = c.Line(', '.join([loop_init] + uinit))
ustep = ['%s = %s' % (i.index, ccode(i.step)) for i in o.uindices]
loop_inc = c.Line(', '.join([loop_inc] + ustep))
# Create For header+body
handle = c.For(loop_init, loop_cond, loop_inc, c.Block(body))
# Attach pragmas, if any
if o.pragmas:
handle = c.Module(o.pragmas + (handle,))
return handle
def visit_Callable(self, o):
body = flatten(self.visit(i) for i in o.children)
decls = self._args_decl(o.parameters)
casts = self._args_cast(o.parameters)
signature = c.FunctionDeclaration(c.Value(o.retval, o.name), decls)
return c.FunctionBody(signature, c.Block(casts + body))
def visit_Operator(self, o):
# Kernel signature and body
body = flatten(self.visit(i) for i in o.children)
decls = self._args_decl(o.parameters)
casts = self._args_cast(o.parameters)
signature = c.FunctionDeclaration(c.Value(o.retval, o.name), decls)
retval = [c.Statement("return 0")]
kernel = c.FunctionBody(signature, c.Block(casts + body + retval))
# Elemental functions
efuncs = [i.root.ccode for i in o.func_table.values() if i.local] + [blankline]
# Header files, extra definitions, ...
header = [c.Line(i) for i in o._headers]
includes = [c.Include(i, system=False) for i in o._includes]
includes += [blankline]
cglobals = list(o._globals)
if o._compiler.src_ext == 'cpp':
cglobals += [c.Extern('C', signature)]
cglobals = [i for j in cglobals for i in (j, blankline)]
return c.Module(header + includes + cglobals + efuncs + [kernel])
class FindSections(Visitor):
@classmethod
def default_retval(cls):
return OrderedDict()
"""Find all sections in an Iteration/Expression tree. A section is a map
from an iteration space (ie, a sequence of :class:`Iteration` obects) to
a set of expressions (ie, the :class:`Expression` objects enclosed by the
iteration space).
"""
def visit_tuple(self, o, ret=None, queue=None):
if ret is None:
ret = self.default_retval()
for i in o:
ret = self.visit(i, ret=ret, queue=queue)
return ret
def visit_Node(self, o, ret=None, queue=None):
if ret is None:
ret = self.default_retval()
for i in o.children:
ret = self.visit(i, ret=ret, queue=queue)
return ret
def visit_Iteration(self, o, ret=None, queue=None):
if queue is None:
queue = [o]
else:
queue.append(o)
for i in o.children:
ret = self.visit(i, ret=ret, queue=queue)
queue.remove(o)
return ret
def visit_Expression(self, o, ret=None, queue=None):
if ret is None:
ret = self.default_retval()
if queue is not None:
ret.setdefault(tuple(queue), []).append(o)
return ret
visit_Element = visit_Expression
visit_Call = visit_Expression
class MapExpressions(FindSections):
"""
Map :class:`Expression` and :class:`Call` objects in the Iteration/Expression
tree to their respective section.
"""
def visit_Call(self, o, ret=None, queue=None):
if ret is None:
ret = self.default_retval()
ret[o] = as_tuple(queue)
return ret
visit_Expression = visit_Call
visit_Element = FindSections.visit_Node
class MapIteration(FindSections):
"""
Map each :class:`Iteration` object in the Iteration/Expression tree to the
enclosed :class:`Expression` and :class:`Call` objects.
"""
def visit_Call(self, o, ret=None, queue=None):
if ret is None:
ret = self.default_retval()
for i in as_tuple(queue):
ret.setdefault(i, []).append(o)
return ret
visit_Expression = visit_Call
visit_Element = FindSections.visit_Node
class FindSymbols(Visitor):
@classmethod
def default_retval(cls):
return []
"""Find symbols in an Iteration/Expression tree.
:param mode: Drive the search. Accepted values are: ::
* 'kernel-data' (default): Collect :class:`SymbolicFunction` objects.
* 'symbolics': Collect :class:`AbstractSymbol` objects.
* 'symbolics-writes': Collect written :class:`AbstractSymbol` objects.
* 'free-symbols': Collect all free symbols.
* 'dimensions': Collect :class:`Dimension` objects only.
"""
rules = {
'kernel-data': lambda e: [i for i in e.functions if i.is_SymbolicFunction],
'symbolics': lambda e: e.functions,
'symbolics-writes': lambda e: as_tuple(e.write),
'free-symbols': lambda e: e.expr.free_symbols,
'dimensions': lambda e: e.dimensions,
}
def __init__(self, mode='kernel-data'):
super(FindSymbols, self).__init__()
self.rule = self.rules[mode]
def visit_tuple(self, o):
symbols = flatten([self.visit(i) for i in o])
return filter_sorted(symbols, key=attrgetter('name'))
def visit_Iteration(self, o):
symbols = flatten([self.visit(i) for i in o.children])
return filter_sorted(symbols, key=attrgetter('name'))
def visit_Expression(self, o):
return filter_sorted([f for f in self.rule(o)], key=attrgetter('name'))
class FindNodes(Visitor):
@classmethod
def default_retval(cls):
return []
"""
Find :class:`Node` instances.
:param match: Pattern to look for.
:param mode: Drive the search. Accepted values are: ::
* 'type' (default): Collect all instances of type ``match``.
* 'scope': Return the scope in which the object ``match`` appears.
"""
rules = {
'type': lambda match, o: isinstance(o, match),
'scope': lambda match, o: match in flatten(o.children)
}
def __init__(self, match, mode='type'):
super(FindNodes, self).__init__()
self.match = match
self.rule = self.rules[mode]
def visit_object(self, o, ret=None):
return ret
def visit_tuple(self, o, ret=None):
for i in o:
ret = self.visit(i, ret=ret)
return ret
def visit_Node(self, o, ret=None):
if ret is None:
ret = self.default_retval()
if self.rule(self.match, o):
ret.append(o)
for i in o.children:
ret = self.visit(i, ret=ret)
return ret
class FindAdjacentIterations(Visitor):
@classmethod
def default_retval(cls):
return OrderedDict([('seen_iteration', False)])
"""
Return a mapper from nodes N in an Expression/Iteration tree to sequences of
:class:`Iteration` objects I = [I_0, I_1, ...], where N is the direct ancestor of
the items in I and all items in I are adjacent nodes in the tree.
"""
def handler(self, o, parent=None, ret=None):
if ret is None:
ret = self.default_retval()
if parent is None:
return ret
group = []
for i in o:
ret = self.visit(i, parent=parent, ret=ret)
if ret['seen_iteration'] is True:
group.append(i)
else:
if len(group) > 1:
ret.setdefault(parent, []).append(tuple(group))
# Reset the group, Iterations no longer adjacent
group = []
# Potential leftover
if len(group) > 1:
ret.setdefault(parent, []).append(tuple(group))
return ret
def visit_object(self, o, parent=None, ret=None):
return ret
def visit_tuple(self, o, parent=None, ret=None):
return self.handler(o, parent=parent, ret=ret)
def visit_Node(self, o, parent=None, ret=None):
ret = self.handler(o.children, parent=o, ret=ret)
ret['seen_iteration'] = False
return ret
def visit_Iteration(self, o, parent=None, ret=None):
ret = self.handler(o.children, parent=o, ret=ret)
ret['seen_iteration'] = True
return ret
class IsPerfectIteration(Visitor):
"""
Return True if an :class:`Iteration` defines a perfect loop nest, False otherwise.
"""
def visit_object(self, o, **kwargs):
return False
def visit_tuple(self, o, **kwargs):
return all(self.visit(i, **kwargs) for i in o)
def visit_Node(self, o, found=False, **kwargs):
# Assume all nodes are in a perfect loop if they're in a loop.
return found
def visit_Iteration(self, o, found=False, multi=False):
if found and multi:
return False
multi = len(o.nodes) > 1
return all(self.visit(i, found=True, multi=multi) for i in o.children)
class Transformer(Visitor):
"""
Given an Iteration/Expression tree T and a mapper from nodes in T to
a set of new nodes L, M : N --> L, build a new Iteration/Expression tree T'
where a node ``n`` in N is replaced with ``M[n]``.
In the special case in which ``M[n]`` is None, ``n`` is dropped from T'.
In the special case in which ``M[n]`` is an iterable of nodes, ``n`` is
"extended" by pre-pending to its body the nodes in ``M[n]``.
"""
def __init__(self, mapper={}):
super(Transformer, self).__init__()
self.mapper = mapper.copy()
self.rebuilt = {}
def visit_object(self, o, **kwargs):
return o
def visit_tuple(self, o, **kwargs):
visited = tuple(self.visit(i, **kwargs) for i in o)
return tuple(i for i in visited if i is not None)
visit_list = visit_tuple
def visit_Node(self, o, **kwargs):
if o in self.mapper:
handle = self.mapper[o]
if handle is None:
# None -> drop /o/
return None
elif isinstance(handle, Iterable):
if not o.children:
raise VisitorException
extended = (tuple(handle) + o.children[0],) + o.children[1:]
return o._rebuild(*extended, **o.args_frozen)
else:
return handle._rebuild(**handle.args)
else:
rebuilt = [self.visit(i, **kwargs) for i in o.children]
return o._rebuild(*rebuilt, **o.args_frozen)
def visit(self, o, *args, **kwargs):
obj = super(Transformer, self).visit(o, *args, **kwargs)
if isinstance(o, Node) and obj is not o:
self.rebuilt[o] = obj
return obj
class NestedTransformer(Transformer):
"""
Unlike a :class:`Transformer`, a :class:`NestedTransforer` applies
replacements in a depth-first fashion.
"""
def visit_Node(self, o, **kwargs):
rebuilt = [self.visit(i, **kwargs) for i in o.children]
handle = self.mapper.get(o, o)
if handle is None:
# None -> drop /o/
return None
elif isinstance(handle, Iterable):
if not o.children:
raise VisitorException
extended = [tuple(handle) + rebuilt[0]] + rebuilt[1:]
return o._rebuild(*extended, **o.args_frozen)
else:
return handle._rebuild(*rebuilt, **handle.args_frozen)
class SubstituteExpression(Transformer):
"""
:class:`Transformer` that performs symbol substitution on
:class:`Expression` objects in a given tree.
:param subs: Dict defining the symbol substitution
"""
def __init__(self, subs={}):
super(SubstituteExpression, self).__init__()
self.subs = subs
def visit_Expression(self, o):
o.substitute(self.subs)
return o._rebuild(expr=o.expr)
class ResolveTimeStepping(Transformer):
"""
:class:`Transformer` class that creates a substitution dictionary
for replacing :class:`Dimension` instances with explicit loop
variables in :class:`Iteration` nodes. For stepping dimensions it
also inserts the relevant definitions for buffer index variables,
for exaple.:
.. code-block:: c
for (int t = 0; t < t_size; t += 1)
{
int t0 = (t) % 2;
int t1 = (t + 1) % 2;
"""
def visit_object(self, o, subs, **kwargs):
return o, subs
def visit_tuple(self, o, subs, **kwargs):
visited = []
for i in o:
handle, subs = self.visit(i, subs, **kwargs)
visited.append(handle)
return tuple(visited), subs
visit_list = visit_object
def visit_Node(self, o, subs, **kwargs):
rebuilt, _ = zip(*[self.visit(i, subs, **kwargs) for i in o.children])
return o._rebuild(*rebuilt, **o.args_frozen), subs
def visit_Iteration(self, o, subs, offsets=defaultdict(set)):
nodes, subs = self.visit(o.children, subs, offsets=offsets)
if o.dim.is_Stepping:
# For SteppingDimension insert the explicit
# definition of buffered variables, eg. t+1 => t1
init = []
for i, off in enumerate(filter_ordered(offsets[o.dim])):
vname = Scalar(name="%s%d" % (o.dim.name, i), dtype=np.int32)
value = (o.dim.parent + off) % o.dim.modulo
init.append(UnboundedIndex(vname, value, value))
subs[o.dim + off] = LoweredDimension(vname.name, o.dim, off)
# Always lower to symbol
subs[o.dim.parent] = Scalar(name=o.dim.parent.name, dtype=np.int32)
return o._rebuild(index=o.dim.parent.name, uindices=init), subs
else:
return o._rebuild(*nodes), subs
def visit_Expression(self, o, subs, offsets=defaultdict(set)):
"""Collect all offsets used with a dimension"""
for dim, offs in o.stencil.entries:
offsets[dim].update(offs)
return o, subs
def visit(self, o, subs=None, **kwargs):
if subs is None:
subs = {}
obj, subs = super(ResolveTimeStepping, self).visit(o, subs, **kwargs)
return obj, subs
class MergeOuterIterations(Transformer):
"""
:class:`Transformer` that merges subsequent :class:`Iteration`
objects iff their dimenions agree.
"""
def is_mergable(self, iter1, iter2):
"""Defines if two :class:`Iteration` objects are mergeable.
Note: This currently does not(!) consider data dependencies
between the loops. A deeper analysis is required for this that
will be added soon.
"""
if iter1.dim.is_Stepping:
# Aliasing only works one-way because we left-merge
if iter1.dim.parent == iter2.dim:
return True
if iter2.dim.is_Stepping and iter1.dim.parent == iter2.dim.parent:
return True
return iter1.dim == iter2.dim and iter1.bounds_symbolic == iter2.bounds_symbolic
def merge(self, iter1, iter2):
"""Creates a new merged :class:`Iteration` object from two
loops along the same dimension.
"""
newexpr = iter1.nodes + iter2.nodes
return Iteration(newexpr, dimension=iter1.dim,
limits=iter1.limits,
offsets=iter1.offsets)
def visit_Iteration(self, o):
rebuilt = self.visit(o.children)
ret = o._rebuild(*rebuilt, **o.args_frozen)
return ret
def visit_list(self, o):
head = self.visit(o[0])
if len(o) < 2:
return tuple([head])
body = self.visit(o[1:])
if head.is_Iteration and body[0].is_Iteration:
if self.is_mergable(head, body[0]):
newit = self.merge(head, body[0])
ret = self.visit([newit] + list(body[1:]))
return as_tuple(ret)
return tuple([head] + list(body))
visit_tuple = visit_list
def printAST(node, verbose=True):
return PrintAST(verbose=verbose).visit(node)
|
the-stack_106_16420
|
# qubit number=4
# total number=30
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.x(input_qubit[3]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[1],input_qubit[0]) # number=16
prog.h(input_qubit[1]) # number=20
prog.h(input_qubit[2]) # number=19
prog.cx(input_qubit[3],input_qubit[0]) # number=24
prog.z(input_qubit[3]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=21
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[2],input_qubit[0]) # number=22
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2052.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_16421
|
from enum import Enum
from engine.assets import GLBFile, GLTFFile
from . import TypedArray, TypedArrayFormat as AFmt
from ..base_types import name_generator, Id
mesh_name = name_generator("Mesh")
# Maps of default attributes exported in a GLTF file
DEFAULT_ATTRIBUTES_MAP = {
"POSITION": "POSITION", "NORMAL": "NORMAL", "TANGENT": "TANGENT", "TEXCOORD_0": "TEXCOORD_0"
}
class MeshPrefab(Enum):
Plane = 0
class Mesh(object):
def __init__(self, **kwargs):
self._id = Id()
self.name = kwargs.get('name', next(mesh_name))
self.indices = None
self.attributes = None
self.buffer = None
@classmethod
def from_array(cls, indices, attributes, **kwargs):
mesh = super().__new__(cls)
mesh.__init__(**kwargs)
mesh.indices = indices
mesh.attributes = attributes
return mesh
@classmethod
def from_gltf(cls, gltf_file, index_or_name, **kwargs):
if not (isinstance(gltf_file, GLBFile) or isinstance(gltf_file, GLTFFile)):
raise TypeError(f"Unknown/Unsupported type: {type(gltf_file).__qualname__}")
mesh = None
if isinstance(index_or_name, str):
mesh = next((m for m in gltf_file.layout["meshes"] if m["name"] == index_or_name), None)
if mesh is None:
names = [m["name"] for m in gltf_file.layout["meshes"]]
raise ValueError(f"No mesh named {index_or_name} in gltf file. Available meshes: {names}")
else:
mesh = gltf_file.layout["meshes"][index_or_name]
mesh0 = mesh['primitives'][0]
attributes_map = kwargs.get('attributes_map', DEFAULT_ATTRIBUTES_MAP)
indices_data = gltf_file.accessor_data(mesh0["indices"])
attributes = {}
for attr_name, acc_index in mesh0["attributes"].items():
mapped_name = attributes_map.get(attr_name)
if mapped_name is not None:
attributes[mapped_name] = gltf_file.accessor_data(acc_index)
mesh = super().__new__(cls)
mesh.__init__(**kwargs)
mesh.indices = indices_data
mesh.attributes = attributes
return mesh
@classmethod
def from_prefab(cls, prefab, **params):
attributes_map = params.get('attributes_map', DEFAULT_ATTRIBUTES_MAP)
pname = attributes_map.get("POSITION", None)
tex00_name = attributes_map.get("TEXCOORD_0", None)
if prefab is MeshPrefab.Plane:
attributes = {}
indices = TypedArray.from_array(fmt=AFmt.UInt16, array=(0, 1, 2, 0, 3, 2))
if pname is not None:
if params.get("invert_y", False):
array=(-0.7, 0.7, 0, 0.7, 0.7, 0, 0.7, -0.7, 0, -0.7, -0.7, 0)
else:
array=(-0.7, -0.7, 0, 0.7, -0.7, 0, 0.7, 0.7, 0, -0.7, 0.7, 0)
attributes[pname] = TypedArray.from_array(fmt=AFmt.Float32, array=array)
if tex00_name is not None:
attributes[tex00_name] = TypedArray.from_array(fmt=AFmt.Float32, array=(1,0, 0,0, 0,1, 1,1))
else:
raise ValueError(f"Unknown built-in mesh: {builtin}")
return Mesh.from_array(indices = indices, attributes = attributes, **params)
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id.value = value
def size(self):
return self.indices.size_bytes + sum(map(lambda a: a.size_bytes, self.attributes.values()))
|
the-stack_106_16422
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.wc_wm_cspp
@file marine-integrations/mi/dataset/parser/wc_wm_cspp.py
@author Jeff Roy
@brief wc_wm Parser for the cspp_eng_cspp dataset driver
Release notes: This is one of 4 parsers that make up that driver
initial release
"""
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import RecoverableSampleException
from mi.core.instrument.dataset_data_particle import DataParticle
from mi.dataset.parser.cspp_base import \
CsppParser, \
Y_OR_N_REGEX, \
END_OF_LINE_REGEX, \
CsppMetadataDataParticle, \
MetadataRawDataKey, \
encode_y_or_n
from mi.dataset.parser.common_regexes import INT_REGEX, \
FLOAT_REGEX, \
MULTIPLE_TAB_REGEX
# Input Records are formatted as follows
# FORMAT DATA Type Field Units Notes
#
# string float64 Profiler Timestamp seconds Seconds since 1/1/70 with millisecond resolution
# string float32 Depth decibars
# string string Suspect Timestamp 1 "y" or "n"
# string int32 Encoder Counts counts Keeps track of the net rotation done by the winch axle
# string float32 Winch Current A Current drawn by the winch motor. Sign reflects direction
# string string Winch Status 1
# string float32 Velocity counts/s How fast the winch is spooling rope
# string float32 Temperature deg_C Temperature of winch assembly
# string float32 Winch Voltage volts Voltage at the motor control module
# string int32 Time Counts counts Related to estimating battery energy
# string int32 Discharge Counts counts Related to estimating battery energy
# string float32 Rope on Drum meters Amount of rope on the winch drum
STRING_REGEX = r'\S*' # any non white space
DATA_REGEX = '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Depth
DATA_REGEX += '(' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # Suspect Timestamp
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Encoder Counts
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Winch Current
DATA_REGEX += '(' + STRING_REGEX + ')' + MULTIPLE_TAB_REGEX # Winch Status
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Velocity
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Temperature
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Winch Voltage
DATA_REGEX += INT_REGEX + MULTIPLE_TAB_REGEX # Time Counts (ignored)
DATA_REGEX += INT_REGEX + MULTIPLE_TAB_REGEX # Discharge Counts (ignored)
DATA_REGEX += '(' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX # Rope on Drum
class WcWmDataTypeKey(BaseEnum):
WC_WM_CSPP_TELEMETERED = 'wc_wm_cspp_telemetered'
WC_WM_CSPP_RECOVERED = 'wc_wm_cspp_recovered'
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record chunk.
Used to access the match groups in the particle raw data
"""
PROFILER_TIMESTAMP = 1
PRESSURE = 2
SUSPECT_TIMESTAMP = 3
ENCODER_COUNTS = 4
WINCH_CURRENT = 5
WINCH_STATUS = 6
VELOCITY = 7
TEMPERATURE = 8
WINCH_VOLTAGE = 9
ROPE_ON_DRUM = 10
class WcWmDataParticleType(BaseEnum):
ENGINEERING_TELEMETERED = 'cspp_eng_cspp_wc_wm_eng'
ENGINEERING_RECOVERED = 'cspp_eng_cspp_wc_wm_eng_recovered'
METADATA_TELEMETERED = 'cspp_eng_cspp_wc_wm_metadata'
METADATA_RECOVERED = 'cspp_eng_cspp_wc_wm_metadata_recovered'
class WcWmEngDataParticleKey(BaseEnum):
"""
The data particle keys associated with wc_wm engineering data particle parameters
"""
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
ENCODER_COUNTS = 'encoder_counts'
WINCH_CURRENT = 'current_flt32'
WINCH_STATUS = 'device_status'
WINCH_VELOCITY = 'winch_velocity'
TEMPERATURE = 'temperature'
WINCH_VOLTAGE = 'voltage_flt32'
ROPE_ON_DRUM = 'rope_on_drum'
# A group of instrument data particle encoding rules used to simplify encoding using a loop
ENGINEERING_PARTICLE_ENCODING_RULES = [
(WcWmEngDataParticleKey.PROFILER_TIMESTAMP, DataMatchesGroupNumber.PROFILER_TIMESTAMP, float),
(WcWmEngDataParticleKey.PRESSURE, DataMatchesGroupNumber.PRESSURE, float),
(WcWmEngDataParticleKey.SUSPECT_TIMESTAMP, DataMatchesGroupNumber.SUSPECT_TIMESTAMP, encode_y_or_n),
(WcWmEngDataParticleKey.ENCODER_COUNTS, DataMatchesGroupNumber.ENCODER_COUNTS, int),
(WcWmEngDataParticleKey.WINCH_CURRENT, DataMatchesGroupNumber.WINCH_CURRENT, float),
(WcWmEngDataParticleKey.WINCH_STATUS, DataMatchesGroupNumber.WINCH_STATUS, str),
(WcWmEngDataParticleKey.WINCH_VELOCITY, DataMatchesGroupNumber.VELOCITY, int),
(WcWmEngDataParticleKey.TEMPERATURE, DataMatchesGroupNumber.TEMPERATURE, int),
(WcWmEngDataParticleKey.WINCH_VOLTAGE, DataMatchesGroupNumber.WINCH_VOLTAGE, float),
(WcWmEngDataParticleKey.ROPE_ON_DRUM, DataMatchesGroupNumber.ROPE_ON_DRUM, float),
]
class WcWmMetadataDataParticle(CsppMetadataDataParticle):
"""
Class for building a wc wm metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws RecoverableSampleException If there is a problem with sample creation
"""
results = []
try:
# Append the base metadata parsed values to the results to return
results += self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException("Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class WcWmMetadataRecoveredDataParticle(WcWmMetadataDataParticle):
"""
Class for building a wc wm recovered metadata particle
"""
_data_particle_type = WcWmDataParticleType.METADATA_RECOVERED
class WcWmMetadataTelemeteredDataParticle(WcWmMetadataDataParticle):
"""
Class for building a wc wm telemetered metadata particle
"""
_data_particle_type = WcWmDataParticleType.METADATA_TELEMETERED
class WcWmEngDataParticle(DataParticle):
"""
Class for parsing data from the wc wm engineering data set
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws RecoverableSampleException If there is a problem with sample creation
"""
results = []
try:
# Process each of the instrument particle parameters
for name, group, function in ENGINEERING_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, self.raw_data.group(group), function))
# # Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
# We shouldn't end up with an exception due to the strongly specified regex, but we
# will ensure we catch any potential errors just in case
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException("Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class WcWmEngRecoveredDataParticle(WcWmEngDataParticle):
"""
Class for building a wc wm recovered engineering data particle
"""
_data_particle_type = WcWmDataParticleType.ENGINEERING_RECOVERED
class WcWmEngTelemeteredDataParticle(WcWmEngDataParticle):
"""
Class for building a wc wm telemetered engineering data particle
"""
_data_particle_type = WcWmDataParticleType.ENGINEERING_TELEMETERED
class WcWmCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an WcWmCsppParser object.
@param config The configuration for this WcWmCsppParser parser
@param stream_handle The handle to the data stream containing the cspp_eng_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(WcWmCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX,
ignore_matcher=None)
|
the-stack_106_16423
|
import matplotlib.cm as cm
import numpy
def collate_family_defining(filename):
"""
scan a filename and list all 'FAMILY-DEFINING' features
"""
oh = open(filename, "rU")
doms = []
for line in oh:
if "FAMILY-DEFINING" in line:
line = line.strip().split()
doms.append(line[5])
set_of_doms = set(doms)
for d in set_of_doms:
print("%s\t%s" % (doms.count(d), d))
cols = cm.Paired(numpy.arange(len(set_of_doms))/ (len(set_of_doms)*1.0))[:,:-1]
print(cols)
print(cols)
print()
print("Suggested ColourMap:")
print("col_map = {")
for i, d in enumerate(set_of_doms):
print("\t'%s': '%s'," % (d, '#%02x%02x%02x' % tuple(cols[i]*255)))
print("\t}")
oh.close()
|
the-stack_106_16424
|
#!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: HeaderTesting.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME HeaderTesting - a VTK style and validity checking utility
## .SECTION Description
## HeaderTesting is a script which checks the list of header files for
## validity based on VTK coding standard. It checks for proper super
## classes, number and style of include files, type macro, private
## copy constructor and assignment operator, broken constructors, and
## existence of PrintSelf method. This script should be run as a part
## of the dashboard checking of the Visualization Toolkit and related
## projects.
## .SECTION See Also
## http://www.vtk.org https://www.cdash.org/
## http://www.vtk.org/contribute.php#coding-standards
import sys
import re
import os
import stat
# Get the path to the directory containing this script.
if __name__ == '__main__':
selfpath = os.path.abspath(sys.path[0] or os.curdir)
else:
selfpath = os.path.abspath(os.path.dirname(__file__))
# Load the list of names mangled by windows.h.
exec(compile(open(os.path.join(selfpath, 'WindowsMangleList.py')).read(),
os.path.join(selfpath, 'WindowsMangleList.py'), 'exec'))
## If tested from ctest, make sure to fix all the output strings
test_from_ctest = False
if "DASHBOARD_TEST_FROM_CTEST" in os.environ:
test_from_ctest = True
## For backward compatibility
def StringEndsWith(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 < l2:
return 0
return (str1[(l1-l2):] == str2)
##
class TestVTKFiles:
def __init__(self):
self.FileName = ""
self.ErrorValue = 0;
self.Errors = {}
self.WarningValue = 0;
self.Warnings = {}
self.FileLines = []
self.Export = ""
self.UnnecessaryIncludes = [
"stdio.h",
"stdlib.h",
"string.h",
"iostream",
"iostream.h",
"strstream",
"strstream.h",
"fstream",
"fstream.h",
"windows.h"
]
pass
def SetExport(self, export):
self.Export = export
def Print(self, text=""):
rtext = text
if test_from_ctest:
rtext = rtext.replace("<", "<")
rtext = rtext.replace(">", ">")
print(rtext)
def Error(self, error):
self.ErrorValue = 1
self.Errors[error] = 1
pass
def Warning(self, warning):
self.WarningValue = 1
self.Warnings[warning] = 1
pass
def PrintErrors(self):
if self.ErrorValue:
self.Print( )
self.Print( "There were errors:" )
for a in self.Errors:
self.Print( "* %s" % a )
def PrintWarnings(self):
if self.WarningValue:
self.Print( )
self.Print( "There were warnings:" )
for a in self.Warnings:
self.Print( "* %s" % a )
def TestFile(self, filename):
self.FileName = filename
self.FileLines = []
self.ClassName = ""
self.ParentName = ""
try:
if sys.hexversion >= 0x03000000:
file = open(filename, encoding='ascii', errors='ignore')
else:
file = open(filename)
self.FileLines = file.readlines()
file.close()
except:
self.Print("Problem reading file %s:\n%s" %
(filename, str(sys.exc_info()[1])))
sys.exit(1)
return not self.CheckExclude()
def CheckExclude(self):
prefix = '// VTK-HeaderTest-Exclude:'
prefix_c = '/* VTK-HeaderTest-Exclude:'
suffix_c = ' */'
exclude = 0
for l in self.FileLines:
if l.startswith(prefix):
e = l[len(prefix):].strip()
if e == os.path.basename(self.FileName):
exclude += 1
else:
self.Error("Wrong exclusion: "+l.rstrip())
elif l.startswith(prefix_c) and l.rstrip().endswith(suffix_c):
e = l[len(prefix_c):-len(suffix_c)].strip()
if e == os.path.basename(self.FileName):
exclude += 1
else:
self.Error("Wrong exclusion: "+l.rstrip())
if exclude > 1:
self.Error("Multiple VTK-HeaderTest-Exclude lines")
return exclude > 0
def CheckIncludes(self):
count = 0
lines = []
nplines = []
unlines = []
includere = "^\s*#\s*include\s*[\"<]([^>\"]+)"
ignincludere = ".*\/\/.*"
regx = re.compile(includere)
regx1 = re.compile(ignincludere)
cc = 0
includeparent = 0
for a in self.FileLines:
line = a.strip()
rm = regx.match(line)
if rm and not regx1.match(line):
lines.append(" %4d: %s" % (cc, line))
file = rm.group(1)
if file == (self.ParentName + ".h"):
includeparent = 1
if not StringEndsWith(file, ".h"):
nplines.append(" %4d: %s" % (cc, line))
if file in self.UnnecessaryIncludes:
unlines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 1:
self.Print()
self.Print( "File: %s has %d includes: " %
( self.FileName, len(lines)) )
for a in lines:
self.Print( a )
self.Error("Multiple includes")
if len(nplines) > 0:
self.Print( )
self.Print( "File: %s has non-portable include(s): " % self.FileName )
for a in nplines:
self.Print( a )
self.Error("Non-portable includes")
if len(unlines) > 0:
self.Print( )
self.Print( "File: %s has unnecessary include(s): " % self.FileName )
for a in unlines:
self.Print( a )
self.Error("Unnecessary includes")
if not includeparent and self.ParentName:
self.Print()
self.Print( "File: %s does not include parent \"%s.h\"" %
( self.FileName, self.ParentName ) )
self.Error("Does not include parent")
pass
def CheckGuard(self):
guardre = r"^#ifndef\s+([^ ]*)_h$"
guardsetre = r"^#define\s+([^ ]*)_h$"
guardrex = re.compile(guardre)
guardsetrex = re.compile(guardsetre)
guard = None
guard_set = None
expect_trigger = False
for line in self.FileLines:
line = line.strip()
if expect_trigger:
gs = guardsetrex.match(line)
if gs:
guard_set = gs.group(1)
break
g = guardrex.match(line)
if g:
guard = g.group(1)
expect_trigger = True
if not guard or not guard_set:
self.Print("File: %s is missing a header guard." % self.FileName)
self.Error("Missing header guard")
elif not guard == guard_set:
self.Print("File: %s is not guarded properly." % self.FileName)
self.Error("Guard does is not set properly")
elif not ('%s.h' % guard) == os.path.basename(self.FileName):
self.Print("File: %s has a guard (%s) which does not match its filename." % (self.FileName, guard))
self.Error("Guard does not match the filename")
def CheckParent(self):
classre = "^class(\s+VTK_DEPRECATED)?(\s+[^\s]*_EXPORT)?\s+(vtkm?[A-Z0-9_][^ :\n]*)\s*:\s*public\s+(vtk[^ \n\{]*)"
cname = ""
pname = ""
classlines = []
regx = re.compile(classre)
cc = 0
lastline = ""
for a in self.FileLines:
line = a.strip()
rm = regx.match(line)
if not rm and not cname:
rm = regx.match(lastline + line)
if rm:
export = rm.group(2)
if export:
export = export.strip()
cname = rm.group(3)
pname = rm.group(4)
classlines.append(" %4d: %s" % (cc, line))
if not export:
self.Print("File: %s defines 1 class with no export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Error("No export macro")
elif self.Export and self.Export != export:
self.Print("File: %s defines 1 class with wrong export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Print(" The export macro should be: %s" % (self.Export))
self.Error("Wrong export macro")
cc = cc + 1
lastline = a
if len(classlines) > 1:
self.Print()
self.Print( "File: %s defines %d classes: " %
(self.FileName, len(classlines)) )
for a in classlines:
self.Print( a )
self.Error("Multiple classes defined")
if len(classlines) < 1:
self.Print()
self.Print( "File: %s does not define any classes" % self.FileName )
self.Error("No class defined")
return
#self.Print( "Classname: %s ParentName: %s" % (cname, pname)
self.ClassName = cname
self.ParentName = pname
pass
def CheckTypeMacro(self):
count = 0
lines = []
oldlines = []
typere = "^\s*vtk(Abstract|Base)?Type(Revision)*Macro\s*\(\s*(vtk[^ ,]+)\s*,\s*(vtk[^ \)]+)\s*\)\s*"
typesplitre = "^\s*vtk(Abstract|Base)?Type(Revision)*Macro\s*\("
regx = re.compile(typere)
regxs = re.compile(typesplitre)
cc = 0
found = 0
for a in range(len(self.FileLines)):
line = self.FileLines[a].strip()
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
else:
# Maybe it is in two lines
rm = regxs.match(line)
if rm:
nline = nline = line + " " + self.FileLines[a+1].strip()
line = nline.strip()
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has broken type macro(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName) )
self.Error("Broken type macro")
if len(oldlines) > 0:
self.Print( "File: %s has legacy type-revision macro(s):" % self.FileName )
for a in oldlines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("Legacy style type-revision macro")
if not found:
self.Print( "File: %s does not have type macro" % self.FileName )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("No type macro")
pass
def CheckForCopyAndAssignment(self):
if not self.ClassName:
return
count = 0
lines = []
oldlines = []
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\) = delete;" % ( self.ClassName, self.ClassName)
asgnoperator = "^\s*void\s*operator\s*=\s*\(\s*const\s*%s\s*&\s*\) = delete;" % self.ClassName
#self.Print( copyoperator
regx1 = re.compile(copyoperator)
regx2 = re.compile(asgnoperator)
foundcopy = 0
foundasgn = 0
for a in self.FileLines:
line = a.strip()
if regx1.match(line):
foundcopy = foundcopy + 1
if regx2.match(line):
foundasgn = foundasgn + 1
lastline = ""
if foundcopy < 1:
for a in self.FileLines:
line = a.strip()
if regx1.match(lastline + line):
foundcopy = foundcopy + 1
lastline = a
lastline = ""
if foundasgn < 1:
for a in self.FileLines:
line = a.strip()
if regx2.match(lastline + line):
foundasgn = foundasgn + 1
lastline = a
if foundcopy < 1:
self.Print( "File: %s does not define copy constructor" %
self.FileName )
self.Print( "Should be:\n%s(const %s&) = delete;" %
(self.ClassName, self.ClassName) )
self.Error("No private copy constructor")
if foundcopy > 1:
self.Print( "File: %s defines multiple copy constructors" %
self.FileName )
self.Error("Multiple copy constructor")
if foundasgn < 1:
self.Print( "File: %s does not define assignment operator" %
self.FileName )
self.Print( "Should be:\nvoid operator=(const %s&) = delete;"
% self.ClassName )
self.Error("No private assignment operator")
if foundcopy > 1:
self.Print( "File: %s defines multiple assignment operators" %
self.FileName )
self.Error("Multiple assignment operators")
pass
def CheckWeirdConstructors(self):
count = 0
lines = []
oldlines = []
constructor = "^\s*%s\s*\(([^ )]*)\)" % self.ClassName
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*implemented(\.)*" % ( self.ClassName, self.ClassName)
regx1 = re.compile(constructor)
regx2 = re.compile(copyoperator)
cc = 0
for a in self.FileLines:
line = a.strip()
rm = regx1.match(line)
if rm:
arg = rm.group(1).strip()
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has weird constructor(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "There should be only:\n %s();" % self.ClassName )
self.Error("Weird constructor")
pass
def CheckPrintSelf(self):
if not self.ClassName:
return
typere = "^\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
newtypere = "^\s*virtual\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
regx1 = re.compile(typere)
regx2 = re.compile(newtypere)
found = 0
oldstyle = 0
for a in self.FileLines:
line = a.strip()
rm1 = regx1.match(line)
rm2 = regx2.match(line)
if rm1 or rm2:
found = 1
if rm1:
oldstyle = 1
if not found:
self.Print( "File: %s does not define PrintSelf method:" %
self.FileName )
self.Warning("No PrintSelf method")
pass
def CheckWindowsMangling(self):
lines = []
regx1 = WindowsMangleRegEx
regx2 = re.compile("^.*VTK_LEGACY.*$")
# This version will leave out comment lines but we probably do
# not want to refer to mangled (hopefully deprecated) methods
# in comments.
# regx2 = re.compile("^(\s*//|\s*\*|.*VTK_LEGACY).*$")
cc = 1
for a in self.FileLines:
line = a.strip()
rm = regx1.match(line)
if rm:
arg = rm.group(1).strip()
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has windows.h mangling violations:" % self.FileName )
for a in lines:
self.Print(a)
self.Error("Windows Mangling Violation - choose another name that does not conflict.")
pass
##
test = TestVTKFiles()
## Check command line arguments
if len(sys.argv) < 2:
print("Testing directory not specified...")
print("Usage: %s <directory> [ exception(s) ]" % sys.argv[0])
sys.exit(1)
dirname = sys.argv[1]
exceptions = sys.argv[2:]
if len(sys.argv) > 2:
export = sys.argv[2]
if export[:3] == "VTK" and export[len(export)-len("EXPORT"):] == "EXPORT":
print("Use export macro: %s" % export)
exceptions = sys.argv[3:]
test.SetExport(export)
## Traverse through the list of files
for a in os.listdir(dirname):
## Skip non-header files
if not StringEndsWith(a, ".h"):
continue
## Skip non-vtk files
if not a.startswith('vtk'):
continue
## Skip exceptions
if a in exceptions:
continue
pathname = '%s/%s' % (dirname, a)
if pathname in exceptions:
continue
mode = os.stat(pathname)[stat.ST_MODE]
## Skip directories
if stat.S_ISDIR(mode):
continue
elif stat.S_ISREG(mode) and test.TestFile(pathname):
## Do all the tests
test.CheckGuard()
test.CheckParent()
test.CheckIncludes()
test.CheckTypeMacro()
test.CheckForCopyAndAssignment()
test.CheckWeirdConstructors()
test.CheckPrintSelf()
test.CheckWindowsMangling()
## Summarize errors
test.PrintWarnings()
test.PrintErrors()
sys.exit(test.ErrorValue)
|
the-stack_106_16426
|
# -*- coding: utf-8 -*-
"""
sjkscan.postprocessing
~~~~~~~~~~~~~~~~~~~~~~
Implements all post processing related actions that sjkscan take on a
scanned document.
:copyright: (c) 2016 by Svante Kvarnström
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import re
import time
from PyPDF2 import PdfFileMerger
from wand.image import Image
from .config import config, load_config
from .logger import init_logging
from .utils import run_cmd, files, move, remove, is_scan_name, parse_args
def rotate_image(filename, degrees):
"""Rotate image given amount of degrees.
:param string filename: file to rotate
:param int degrees: amount of degrees to rotate
"""
logging.info('Rotating %s %s degrees', filename, degrees)
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def rotate_all_images_in_dir(dirname, degrees):
"""Rotate all files in directory.
:param string dirname: name of directory in which files should be rotated
:param int degrees: number of degrees to rotate
"""
logging.info('Rotating images %s degrees in directory %s', dirname, degrees)
for f in files(dirname):
rotate_image(os.path.join(dirname, f), degrees)
def unpaper(filename):
"""Process file with unpaper and delete original.
:param filename: file to run unpaper on
"""
logging.info('Running unpaper on %s', filename)
unpapered_filename = filename + '.unpapered'
# TODO: We don't use unpaper's --overwrite because it currently seems to be
# broken. Once it's been fixed, just --overwrite the original.
run_cmd('unpaper --size a4 "{}" "{}"'.format(filename, unpapered_filename))
move(unpapered_filename, filename)
def unpaper_dir(directory, extension=None):
"""Run unpaper on all files with given extension in directory
:param string directory: directory to process
:param string extension: extension of files to run unpaper on
"""
for f in files(directory, extension):
unpaper(os.path.join(directory, f))
def is_blank(filename):
"""
Check if image is blank.
Return true if filename is a blank image. This is a slightly modified
version of Vinatha Ekanayake's is_blank(), which is part of Scanpdf
(https://github.com/virantha/scanpdf) and licensed under the Apache
license.
:param string filename: file name of image to check
:returns: True if image is blank, False otherwise.
"""
if not os.path.exists(filename):
logging.debug('is_blank: file %s does not exist.')
return True
c = 'identify -verbose %s' % filename
result = run_cmd(c)
mStdDev = re.compile(
b'\s*standard deviation:\s*\d+\.\d+\s*\((?P<percent>\d+\.\d+)\).*')
for line in result.splitlines():
match = mStdDev.search(line)
if match:
stdev = float(match.group('percent'))
if stdev > 0.05:
logging.debug('is_blank: %s is NOT blank - standard deviation > 0.05 (%d)', filename, stdev)
return False
logging.debug('is_blank: %s is probably blank', filename)
return True
def move_blanks(input_dir, output_dir):
"""Move blank .pnm's in input_dir to output_dir
:param string input_dir: directory to check for blank .pnm files
:param string output_dir: where to move blank .pnm files
:returns: number of blank pages moved
:rtype: int
"""
number_of_blanks = 0
for file in files(input_dir, 'pnm'):
image = os.path.join(input_dir, file)
if is_blank(image):
try:
os.mkdir(output_dir)
except:
pass # Assume directory exists.
move(image, output_dir)
number_of_blanks += 1
return number_of_blanks
def remove_if_blank(filename):
"""Remove file if it is blank.
This is useful when scanning in duplex mode using a backend that doesn't
support skipping blank pages.
:param string filename: name of file to remove, if blank
"""
if is_blank(filename):
remove(filename)
def merge_pdfs(inputs, output):
"""Merge selected pdfs.
:param list inputs: files to concatenate
:param string output: name of file to write
"""
merger = PdfFileMerger()
input_fds = dict()
out = open(output, 'wb')
logging.info('Merging PDF files into %s...', output)
for filename in inputs:
logging.debug('Merging %s -> %s', filename, output)
try:
input_fds[filename] = open(filename, 'rb')
except OSError as e:
logging.error('Could not open %s: %s', filename, e)
merger.append(input_fds[filename])
merger.write(out)
logging.info('Finished merging PDF files into %s', output)
def merge_pdfs_in_dir(directory, output):
"""Read all pdf files in directory and create one merged output.
:param string directory: directory containing pdf files to be merged
:param string output: filename of new merged pdf
"""
files_to_merge = []
for pdf in files(directory, 'pdf'):
files_to_merge.append(os.path.join(directory, pdf))
merge_pdfs(files_to_merge, output)
def ocr(filename, language):
"""Perform OCR on file using Tesseract.
:param string filename: file to perform OCR on
:param string language: language(s) expected to be used in file
"""
logging.info('Performing OCR (%s) on %s', language, filename)
base_output_name = filename[:-4]
command = 'tesseract {} {} -l {} pdf'.format(filename,
base_output_name,
language)
run_cmd(command)
def ocr_pnms_in_dir(directory, language):
"""Perform OCR on all pnm files in given directory.
:param string directory: directory in which all pnm files will be OCR:ed
:param string language: language(s) expected to be used in files
"""
for file in files(directory, 'pnm'):
ocr(os.path.join(directory, file), language)
def main(argv=None):
"""
Polls DATA_DIR for finished scans. Once found, scand will:
- Move blank images to subdir blank/
- Rotate remaining images
- OCR remaining images
- Merge resulting pdf files
- Move the directory to INBOX
"""
load_config()
args = parse_args(argv)
init_logging(config['Logging']['level'])
while True:
for entry in os.scandir(config['Paths']['data']):
if not entry.is_dir() or not is_scan_name(entry.name):
continue
archive_dir = config['Paths']['archive']
inbox_dir = config['Paths']['inbox']
scan_dir = os.path.join(config['Paths']['data'], entry.name)
pdf_output = os.path.join(inbox_dir, '{}.pdf'.format(entry.name))
blank_dir = os.path.join(scan_dir, 'blank')
move_blanks(scan_dir, blank_dir)
rotate_all_images_in_dir(scan_dir, 180)
unpaper_dir(scan_dir, 'pnm')
ocr_pnms_in_dir(scan_dir, 'swe')
try:
os.mkdir(archive_dir)
os.mkdir(inbox_dir)
except:
pass # Assume directories exists.
merge_pdfs_in_dir(scan_dir, pdf_output)
move(scan_dir, archive_dir)
time.sleep(1)
|
the-stack_106_16428
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
# Author: Shengjia Yan
# Date: 2017-10-26
# Email: [email protected]
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import logging
import numpy as np
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def load_confusion_matrix(ref_score, pred_score):
ref_file = open('../../nea/output/emb/rnn/prompt_1/fold_0/preds/dev_ref.txt', 'r')
pred_file = open('../../nea/output/emb/rnn/prompt_1/fold_0/preds/dev_pred_49.txt', 'r')
for ref in ref_file.readlines():
ref = ref.strip('\n')
ref = int(ref)
ref_score.append(ref)
for pred in pred_file.readlines():
pred = pred.strip('\n')
pred = float(pred)
pred = round(pred)
pred_score.append(pred)
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
# 1. find out how many samples per class have received their correct label
# 计算真正类别为k的样本被预测成各个类别的比例
# e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# 2. get the precision (fraction of class-k predictions that have ground truth label k)
# 计算预测的准确率
# e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75
# cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def main():
ref_score = [] # true label
pred_score = [] # predicted label
load_confusion_matrix(ref_score, pred_score)
nea_matrix = confusion_matrix(ref_score, pred_score)
np.set_printoptions(precision=2)
class_names = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(nea_matrix, classes=class_names, title='Confusion matrix, without normalization')
plt.savefig('./unnormalized_cm.png')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(nea_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix')
plt.savefig('./normalized_cm.png')
# plt.show()
if __name__ == '__main__':
main()
|
the-stack_106_16430
|
import os
import json
from glob import glob
from hypothesis import target
from matplotlib import patches
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import torch
class Data():
def __init__(self):
self.VAL_LAB_DIR = glob("/Users/yangdongjae/Desktop/2022/Developing/lecttue-diagonosis/AI/Faster-RCNN/data/val/val_lab/*")
self.VAL_IMG_DIR = glob("/Users/yangdongjae/Desktop/2022/Developing/lecttue-diagonosis/AI/Faster-RCNN/data/val/val_img/*")
self.TRAIN_LAB_DIR = glob("/Users/yangdongjae/Desktop/2022/Developing/lecttue-diagonosis/AI/Faster-RCNN/data/train/train_img/*")
self.TRAIN_IMG_DIR = glob("/Users/yangdongjae/Desktop/2022/Developing/lecttue-diagonosis/AI/Faster-RCNN/data/train/train_img/*")
self.adjust_label = 1
def check_data_num(self):
print("number of validation data set : ", len(os.listdir(self.VAL_LAB_DIR)) + len(os.listdir(self.VAL_IMG_DIR)))
print("number of train data set : ", len(os.listdir(self.TRAIN_IMG_DIR)) + len(os.listdir(self.TRAIN_LAB_DIR)))
def generate_box(self, obj):
xmin = obj["annotations"]["points"][0]['xtl']
ymin = obj["annotations"]["points"][0]['ytl']
xmax = obj["annotations"]["points"][0]['xbr']
ymax = obj["annotations"]["points"][0]['ybr']
return[xmin, ymin, xmax, ymax]
def generate_label(self,obj):
if obj["annotations"]["risk"] == 0:
return self.adjust_label
elif obj["annotations"]["risk"] == 1 and obj["annotations"]["disease"] == 9:
return 1 + self.adjust_label
elif obj["annotations"]["risk"] == 2 and obj["annotations"]["disease"] == 9:
return 2 + self.adjust_label
elif obj["annotations"]["risk"] == 3 and obj["annotations"]["disease"] == 9:
return 3 + self.adjust_label
elif obj["annotations"]["risk"] == 1 and obj["annotations"]["disease"] == 10:
return 4 + self.adjust_label
elif obj["annotations"]["risk"] == 2 and obj["annotations"]["disease"] == 10:
return 5 + self.adjust_label
elif obj["annotations"]["risk"] == 3 and obj["annotations"]["disease"] == 10:
return 6 + self.adjust_label
def generate_target(self,file):
boxes = []
labels = []
with open(file, 'r') as f:
json_data = json.load(f)
boxes.append(self.generate_box(json_data))
labels.append(self.generate_label(json_data))
boxes = torch.as_tensor(boxes, dtype = torch.float32)
labels = torch.as_tensor(labels, dtype = torch.int64)
print(boxes)
target = {}
target["boxes"] = boxes
target["labels"] = labels
return target
def plot_image(self, img_path, annotation):
img = mpimg.imread(img_path)
fig,ax = plt.subplots(1)
ax.imshow(img)
xtl= annotation[0]
xbr = annotation[1]
ytl = annotation[2]
ybr = annotation[3]
rect = patches.Rectangle((xtl,ytl),(xbr-xtl),(ybr - ytl), linewidth = 1, edgecolor = 'r', facecolor = 'none')
ax.add_patch(rect)
plt.show()
def test_plot_img(self,path):
"""
path is for image data path
"""
img_idx = path
lab_idx = path + ".json"
bbox = self.generate_box(lab_idx)
self.plot_image(img_idx, bbox)
|
the-stack_106_16431
|
import os
import re
from pathlib import Path
from typing import (
Dict,
Generator,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
Set,
)
from projectreport.searcher.rotating_list import RotatingList
from projectreport.tools.expand_glob import all_possible_paths
def read_all_files_in_folders_print_lines_around_regex(
folders: Sequence[str],
str_pattern: str,
num_lines: int = 2,
recursive: bool = False,
print_lines: bool = True,
ignore_paths: Optional[Sequence[str]] = None,
) -> Dict[str, List[List[str]]]:
"""
Searches for regex in each line of every file in multiple folders. When regex is
matched, will return num_lines lines around the file
:param folders: Folders to search in
:param str_pattern: Pattern to match in a line
:param num_lines: Number of lines around the matching line to return/print,
defaults to 2
:param recursive: Whether to search folders within passed folder, defaults to False
:param print_lines: Whether to print the results, defaults to False
:param ignore_paths: Relative paths to ignore. Globs are accepted, defaults to None
:return: a dictionary where keys are file paths, and values are lists where each
element is a list containing the lines for one match
"""
found_lines = {}
for folder in folders:
found_lines.update(
read_all_files_in_folder_print_lines_around_regex(
folder,
str_pattern,
num_lines=num_lines,
recursive=recursive,
print_lines=print_lines,
ignore_paths=ignore_paths,
)
)
return found_lines
def read_all_files_in_folder_print_lines_around_regex(
file_path: str,
str_pattern: str,
num_lines: int = 2,
recursive: bool = False,
print_lines: bool = True,
ignore_paths: Optional[Sequence[str]] = None,
) -> Dict[str, List[List[str]]]:
"""
Searches for regex in each line of every file in a folder. When regex is matched,
will return num_lines lines around the file
:param file_path: Path of file to search in
:param str_pattern: Pattern to match in a line
:param num_lines: Number of lines around the matching line to return/print,
defaults to 2
:param recursive: Whether to search folders within passed folder, defaults to False
:param print_lines: Whether to print the results, defaults to False
:param ignore_paths: Relative paths to ignore. Globs are accepted, defaults to None
:return: a dictionary where keys are file paths, and values are lists where each
element is a list containing the lines for one match
"""
iterator: Union[
List[Tuple[str, List[str], List[str]]],
Iterator[Tuple[str, List[str], List[str]]],
]
if recursive:
iterator = os.walk(file_path)
else:
iterator = [next(os.walk(file_path))]
if ignore_paths is None:
ignore_paths = []
all_absolute_ignore_paths: Set[Path] = set()
found_lines = {}
def should_ignore_path(path_str: str) -> bool:
path = Path(path_str)
for ignore_path in all_absolute_ignore_paths:
if path == ignore_path or ignore_path in path.parents:
return True
return False
for path, folders, files in iterator:
expanded_ignore_paths = all_possible_paths(ignore_paths, path)
all_absolute_ignore_paths.update(
set([Path(path_str) for path_str in expanded_ignore_paths])
)
if should_ignore_path(path):
# Skip ignored folder
continue
for file in files:
full_path = os.path.join(path, file)
if should_ignore_path(full_path):
# Skip ignored file
continue
lines = read_file_get_lines_around_regex(
full_path, str_pattern, num_lines=num_lines, print_lines=False
)
if lines:
found_lines[full_path] = lines
if print_lines:
print(f"\n\nFound {len(lines)} match in {full_path}")
for line_set in lines:
_print_tracked_lines(line_set)
return found_lines
def read_file_get_lines_around_regex(
file_path: str, str_pattern: str, num_lines: int = 2, print_lines: bool = False
) -> List[List[str]]:
"""
Searches for regex in each line of a file. When regex is matched, will return
num_lines lines around the file
:param file_path: path of file to search in
:param str_pattern: pattern to match in a line
:param num_lines: number of lines around the matching line to return/print,
defaults to 2
:param print_lines: whether to print the results, defaults to False
:return: a list where each element is a list containing the lines for one match
"""
reader = _file_reader(file_path)
lines = _get_lines_around_regex(
str_pattern, reader, num_lines=num_lines, print_lines=print_lines
)
return lines
def _file_reader(filename):
try:
with open(filename, "r", encoding="utf8") as f:
for line in f:
yield line.strip()
except UnicodeDecodeError:
try:
with open(filename, "r", encoding="latin1") as f:
for line in f:
yield line.strip()
except Exception as e:
print(f"Could not read file {filename}: {e}")
return
def _get_lines_around_regex(
str_pattern: str,
lines: Generator[str, None, None],
num_lines: int = 2,
print_lines: bool = False,
) -> List[List[str]]:
pattern = re.compile(str_pattern)
total_num_lines = (
num_lines * 2 + 1
) # 1 line for match, then 2 * num_lines for before and after match
# After finding a match, must delay the print for num_lines as only
# then will we have the lines after
# the match. Set up this list to track at which lines we should print
print_at_lines = []
# Keeps only the last total_num_lines entires
tracked_lines = RotatingList([], total_num_lines)
# Will hold each set of found lines to return at the end
found_lines = []
def record_lines():
found_lines.append(list(tracked_lines))
if print_lines:
_print_tracked_lines(tracked_lines)
# Find and track lines which have matches, printing num_lines after
for i, line in enumerate(lines):
line_num = i + 1
tracked_lines.append(f"{line_num}: {line}")
if _matches_regex(pattern, line):
print_at_lines.append(line_num + num_lines)
if line_num in print_at_lines:
record_lines()
# Print final section if requested line to print at was after the end of the file
if any([print_line_num > line_num for print_line_num in print_at_lines]):
record_lines()
return found_lines
def _print_tracked_lines(lines: Union[RotatingList, List[str]]) -> None:
print("\n" + "\n".join(lines))
def _matches_regex(pattern: re.Pattern, search_str: str) -> bool:
match = re.search(pattern, search_str)
return match is not None
|
the-stack_106_16432
|
# -*- coding: utf-8 -*-
# TensorFlow Production Example (Evaluating)
#----------------------------------
#
# We pull together everything and create an example
# of best tensorflow production tips
#
# The example we will productionalize is the spam/ham RNN
# from the RNN Chapter.
import os
import re
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
tf.app.flags.DEFINE_string("storage_folder", "temp", "Where to store model and data.")
tf.app.flags.DEFINE_string('model_file', False, 'Model file location.')
tf.app.flags.DEFINE_boolean('run_unit_tests', False, 'If true, run tests.')
FLAGS = tf.app.flags.FLAGS
# Create a text cleaning function
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
# Load vocab processor
def load_vocab():
vocab_path = os.path.join(FLAGS.storage_folder, "vocab")
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(vocab_path)
return(vocab_processor)
# Process input data:
def process_data(input_data, vocab_processor):
input_data = clean_text(input_data)
input_data = input_data.split()
processed_input = np.array(list(vocab_processor.transform(input_data)))
return(processed_input)
# Get input function
def get_input_data():
"""
For this function, we just prompt the user for a text message to evaluate
But this function could also potentially read a file in as well.
"""
input_text = input("Please enter a text message to evaluate: ")
vocab_processor = load_vocab()
return(process_data(input_text, vocab_processor))
# Test clean_text function
class clean_test(tf.test.TestCase):
# Make sure cleaning function behaves correctly
def clean_string_test(self):
with self.test_session():
test_input = '--TensorFlow\'s so Great! Don\t you think so? '
test_expected = 'tensorflows so great don you think so'
test_out = clean_text(test_input)
self.assertEqual(test_expected, test_out)
# Main function
def main(args):
# Get flags
storage_folder = FLAGS.storage_folder
# Get user input text
x_data = get_input_data()
# Load model
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(os.path.join(storage_folder, "model.ckpt")))
saver.restore(sess, os.path.join(storage_folder, "model.ckpt"))
# Get the placeholders from the graph by name
x_data_ph = graph.get_operation_by_name("x_data_ph").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
probability_outputs = graph.get_operation_by_name("probability_outputs").outputs[0]
# Make the prediction
eval_feed_dict = {x_data_ph: x_data, dropout_keep_prob: 1.0}
probability_prediction = sess.run(tf.reduce_mean(probability_outputs, 0), eval_feed_dict)
# Print output (Or save to file or DB connection?)
print('Probability of Spam: {:.4}'.format(probability_prediction[1]))
# Run main module/tf App
if __name__ == "__main__":
if FLAGS.run_unit_tests:
# Perform unit tests
tf.test.main()
else:
# Run evaluation
tf.app.run()
|
the-stack_106_16435
|
import torch
import pickle
import serialization
from copy import deepcopy
import pytest
def test_pytorch():
arr = torch.ones((128, 256, 256), dtype=torch.float32)
arr[8, 8, 8] = 2
arrpkl = pickle.dumps(arr)
data = [
1, "2", arr,
[3, "4", deepcopy(arr), {"arr": deepcopy(arr)}],
{4: "5", "6": 7, "arr": deepcopy(arr), "lst": [deepcopy(arr)]}
]
datapkl = pickle.dumps(data)
for key in (None, "key".encode()):
ser = serialization.serialize(arr, key)
assert len(ser) < 0.5 * len(arrpkl)
deser = serialization.deserialize(ser, key)
assert torch.all(arr == deser)
ser = serialization.serialize(data, key)
assert len(ser) < len(datapkl)
assert len(ser) < len(arrpkl)
deser = serialization.deserialize(ser, key)
assert torch.all(deser[2] == arr)
assert torch.all(deser[3][2] == arr)
assert torch.all(deser[3][3]["arr"] == arr)
assert torch.all(deser[4]["arr"] == arr)
assert torch.all(deser[4]["lst"][0] == arr)
with pytest.raises(RuntimeError):
serialization.deserialize(serialization.serialize(arr, key), "nokey".encode())
|
the-stack_106_16436
|
# This script prints a list of all in-use devices in an organization
# to sdtout or a file (Devices which are part of a network are considered in-use).
# The fields printed are 'serial' and 'model' separated by a comma (,).
#
# You need to have Python 3 and the Requests module installed. You
# can download the module here: https://github.com/kennethreitz/requests
# or install it using pip.
#
# To run the script, enter:
# python invlist.py -k <API key> -o <org name> [-f <file path>]
#
# If option -f is not defined, the script will print to stdout.
#
# To make script chaining easier, all lines not containing a
# device record start with the character @
#
# This file was last modified on 2017-02-23
import sys, getopt, requests, json
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext("This is a script that prints a list of an organization's devices to sdtout or a file.")
printusertext('')
printusertext('Usage:')
printusertext('python invlist.py -k <API key> -o <org name> [-f <file path>]')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#Looks up shard URL for a specific org. Use this URL instead of 'dashboard.meraki.com'
# when making API calls with API accounts that can access multiple orgs.
#On failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations/%s/snmp' % p_orgid, headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
return(rjson['hostname'])
def getnwlist(p_apikey, p_shardurl, p_orgid):
#returns a list of all networks in an organization
#on failure returns a single record with 'null' name and id
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'name': 'null', 'id': 'null'})
return(returnvalue)
return(r.json())
def getdevicelist(p_apikey, p_shardurl, p_nwid):
#returns a list of all devices in a network
r = requests.get('https://%s/api/v0/networks/%s/devices' % (p_shardurl, p_nwid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'serial': 'null', 'model': 'null'})
return(returnvalue)
return(r.json())
def main(argv):
#get command line arguments
arg_apikey = 'null'
arg_orgname = 'null'
arg_filepath = 'null'
try:
opts, args = getopt.getopt(argv, 'hk:o:f:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-f':
arg_filepath = arg
if arg_apikey == 'null' or arg_orgname == 'null':
printhelp()
sys.exit(2)
#get organization id corresponding to org name provided by user
orgid = getorgid(arg_apikey, arg_orgname)
if orgid == 'null':
printusertext('ERROR: Fetching organization failed')
sys.exit(2)
#get shard URL where Org is stored
shardurl = getshardurl(arg_apikey, orgid)
if shardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard URL failed')
sys.exit(2)
#get network list for fetched org id
nwlist = getnwlist(arg_apikey, shardurl, orgid)
if nwlist[0]['id'] == 'null':
printusertext('ERROR: Fetching network list failed')
sys.exit(2)
#if user selected to print in file, set flag & open for writing
filemode = False
if arg_filepath != 'null':
try:
f = open(arg_filepath, 'w')
except:
printusertext('ERROR: Unable to open output file for writing')
sys.exit(2)
filemode = True
devicelist = []
for nwrecord in nwlist:
#get devices' list
devicelist = getdevicelist(arg_apikey, shardurl, nwrecord['id'])
#append list to file or stdout
if filemode:
for i in range (0, len(devicelist)):
try:
#MODIFY THE LINE BELOW TO CHANGE OUTPUT FORMAT
f.write('%s,%s\n' % (devicelist[i]['serial'], devicelist[i]['model']))
except:
printusertext('ERROR: Unable to write device info to file')
sys.exit(2)
else:
for i in range (0, len(devicelist)):
#MODIFY THE LINE BELOW TO CHANGE OUTPUT FORMAT
print('%s,%s' % (devicelist[i]['serial'], devicelist[i]['model']))
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_106_16438
|
from typing import Callable, Dict, Literal, TypeVar, Union, overload
import anyio.abc
from typing_extensions import ParamSpec
from ...utils import MISSING
from ..base import CommandInteraction
from .base import Callback
from .context import MessageCommand, UserCommand
from .option import CommandType
from .slash import SlashCommand
__all__ = ('CommandRegistrar',)
P = ParamSpec('P')
RT = TypeVar('RT')
Command = Union[SlashCommand[P, RT], MessageCommand[P, RT], UserCommand[P, RT]]
class CommandRegistrar:
"""Root registrar of command handlers.
Attributes:
commands: A dictionary of all registered commands.
"""
commands: Dict[str, Command]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.commands = {}
def handle_command(
self,
interaction: CommandInteraction,
*,
tg: anyio.abc.TaskGroup
) -> None:
"""Handle the interaction and trigger appropriate callbacks.
There is not much use of this method as a user unless this class
is being extended.
Parameters:
interaction: The interaction to handle.
tg: An anyio task group that will be used to launch callbacks.
"""
command = self.commands.get(interaction.name)
if command is None:
return
command.handle_interaction(interaction, tg=tg)
def register_command(self, command: Command) -> None:
"""Register a command to be added to the internal dictionary.
This should be used over manipulating the internal dictionary.
Parameters:
command: The command to register into the dictionary.
"""
self.commands[command.name] = command
def unregister_command(self, command: Command) -> None:
"""Unregister a command from the internal dictionary.
This will raise a ValueError if the command passed isn't loaded where
it is supposed to or if it was never registered in the first place.
Parameters:
command: The command to unregister from the dictionary.
Raises:
ValueError: The command couldn't be found where it's supposed to
"""
if self.commands.get(command.name) != command:
raise ValueError(
"'command' has not been registered previously or another"
"command is registered in its place"
)
del self.commands[command.name]
def group(
self,
*,
name: str,
description: str
) -> SlashCommand:
"""Register and create a slash command without a callback.
This exist so that slash commands can be created without attaching a
dummy-callback. Context menu commands cannot have subcommands, for that
reason this will always return a slash command.
**Usage:**
```python
from wumpy import interactions
app = interactions.InteractionApp(...)
parent = app.group(name='hello', description='Greet and say hello')
... # Register subcommands
```
Parameters:
name: The name of the command.
description: The description of the command.
Returns:
A registered new Slashcommand without a callback.
"""
command = SlashCommand(name=name, description=description)
self.register_command(command) # type: ignore
return command
@overload
def command(
self,
type: Callback[P, RT]
) -> SlashCommand[P, RT]:
...
@overload
def command(
self,
type: CommandType = CommandType.chat_input,
*,
name: str = MISSING,
description: str = MISSING
) -> Callable[[Callback[P, RT]], SlashCommand[P, RT]]:
...
@overload
def command(
self,
type: Literal[CommandType.message],
*,
name: str = MISSING
) -> Callable[[Callback[P, RT]], MessageCommand[P, RT]]:
...
@overload
def command(
self,
type: Literal[CommandType.user],
*,
name: str = MISSING
) -> Callable[[Callback[P, RT]], UserCommand[P, RT]]:
...
def command(
self,
type: Union[CommandType, Callback[P, RT]] = CommandType.chat_input,
*,
name: str = MISSING,
description: str = MISSING
) -> Union[Command[P, RT], Callable[[Callback[P, RT]], Command[P, RT]]]:
"""Register and create a new application command through a decorator.
The decorator can be used both with and without the parentheses.
Examples:
```python
from wumpy.interactions import InteractionApp, CommandInteraction
app = InteractionApp(...)
@app.command()
async def random(interaction: CommandInteraction) -> None:
await interaction.respond('4') # chosen by fair dice roll
```
Parameters:
type: The type of the command. Defaults to a slash command.
name: The name of the command.
description: The description of the command.
Returns:
A command or function that returns a command. Depending on whether
the decorator was used with or without parentheses.
Exceptions:
ValueError: The type wasn't a CommandType value
"""
def decorator(func: Callback[P, RT]) -> Command[P, RT]:
if type is CommandType.chat_input:
command = SlashCommand(func, name=name, description=description)
elif type is CommandType.message:
command = MessageCommand(func, name=name)
elif type is CommandType.user:
command = UserCommand(func, name=name)
else:
raise ValueError("Unknown value of 'type':", type)
self.register_command(command) # type: ignore
return command
if callable(type):
return decorator(type)
return decorator
|
the-stack_106_16439
|
# -*- coding: utf-8 -*-
import os
import re
import shutil
from concurrent.futures import ThreadPoolExecutor
import logging
import asyncio
import discord
from ..core.app import App
from ..sources import crawler_list
from ..utils.uploader import upload
from ..binders import available_formats
logger = logging.getLogger('DISCORD_BOT')
class DiscordBot(discord.Client):
# Store user message handlers
handlers = dict()
# The special signal character for crawler commands
signal = os.getenv('DISCORD_SIGNAL_CHAR') or '!'
def start_bot(self):
self.run(os.getenv('DISCORD_TOKEN'))
# end def
@asyncio.coroutine
async def on_ready(self):
print('Discord bot in online!')
activity = discord.Game(name="🔥Ready For Smelting🔥")
await self.change_presence(status=discord.Status.online, activity=activity)
# end def
@asyncio.coroutine
async def on_message(self, message):
if message.author == self.user:
return # I am not crazy to talk with myself
# end if
if message.author.bot:
return # Other bots are not edible
# end if
if isinstance(message.channel, discord.abc.PrivateChannel):
await self.handle_message(message)
elif message.content == '!help':
await self.send_public_text(
message,
'Enter `%slncrawl` to start a new session of **Lightnovel Crawler**' % self.signal
)
elif message.content == self.signal + 'lncrawl':
uid = message.author.id
await self.send_public_text(
message, "I will message you privately <@%s>" % uid)
handler = self.handlers.get(uid)
if handler:
handler.destroy()
# end if
await self.handle_message(message)
else:
return # It goes over the head
# end if
# end def
async def send_public_text(self, message, text):
async with message.channel.typing():
await message.channel.send(text)
# end def
async def handle_message(self, message):
try:
user = message.author
handler = self.init_handler(user.id)
await handler.process(message)
except Exception as err:
logger.exception('While handling this message: %s', message)
try:
await message.channel.send(
'Sorry! We had some trouble processing your request. Please try again.\n\n' +
'Report [here](https://github.com/dipu-bd/lightnovel-crawler/issues/new/choose)' +
' if this problem continues with this message: `' +
str(err) + '`'
)
except Exception:
pass # this world is doomed!!
# end try
# end try
# end def
def init_handler(self, uid):
if not self.handlers.get(uid):
self.handlers[uid] = MessageHandler(self)
# end if
return self.handlers.get(uid)
# end def
# end def
class MessageHandler:
def __init__(self, client):
self.app = App()
self.client = client
self.state = None
self.executors = ThreadPoolExecutor(1)
# end def
def destroy(self):
try:
self.app.destroy()
self.executors.shutdown(False)
except Exception:
logger.exception('While destroying MessageHandler')
finally:
self.client.handlers.pop(self.user.id)
shutil.rmtree(self.app.output_path, ignore_errors=True)
# end try
# end def
@asyncio.coroutine
async def send(self, *contents):
for text in contents:
if text:
# await self.client.send_typing(self.user)
async with self.user.typing():
await self.user.send(text)
# end if
# end for
# end def
@asyncio.coroutine
async def process(self, message):
self.message = message
self.user = message.author
if not self.state:
await self.send(
'-' * 80 + '\n' +
('Hello %s\n' % self.user.name) +
'*Lets make reading lightnovels great again!*\n' +
'-' * 80 + '\n'
)
self.state = self.get_novel_url
# end if
await self.state()
# end def
async def get_novel_url(self):
await self.send(
'I recognize these two categories:\n'
'- Profile page url of a lightnovel.\n'
'- A query to search your lightnovel.',
'What are you looking for?'
)
self.state = self.handle_novel_url
# end def
async def handle_novel_url(self):
try:
self.app.user_input = self.message.content.strip()
self.app.init_search()
except Exception:
await self.send(
'Sorry! I only know these sources:\n' +
'\n'.join(['- %s' % x for x in crawler_list.keys()]),
'Enter something again.')
# end try
if len(self.app.user_input) < 4:
await self.send('Your query is too short')
return
# end if
if self.app.crawler:
await self.send('Got your page link')
await self.get_novel_info()
else:
await self.send(
'Searching %d sources for "%s"\n' % (
len(self.app.crawler_links), self.app.user_input),
'Please do not type anything before I reply!'
)
await self.display_novel_selection()
# end if
# end def
async def display_novel_selection(self):
async with self.user.typing():
self.app.search_novel()
if len(self.app.search_results) == 0:
await self.send('No novels found for "%s"' % self.app.user_input)
return
# end if
if len(self.app.search_results) == 1:
self.selected_novel = self.app.search_results[0]
await self.display_sources_selection()
return
# end if
await self.send(
('Found %d novels:\n' % len(self.app.search_results)) +
'\n'.join([
'%d. **%s** `%d sources`' % (
i + 1,
item['title'],
len(item['novels'])
) for i, item in enumerate(self.app.search_results)
]) + '\n' +
'Enter name or index of your novel.\n' +
'Send `!cancel` to stop this session.'
)
self.state = self.handle_novel_selection
# end def
async def handle_novel_selection(self):
text = self.message.content.strip()
if text.startswith('!cancel'):
await self.get_novel_url()
return
# end if
async with self.user.typing():
match_count = 0
selected = None
for i, res in enumerate(self.app.search_results):
if str(i + 1) == text:
selected = res
match_count += 1
elif text.isdigit() or len(text) < 3:
pass
elif res['title'].lower().find(text) != -1:
selected = res
match_count += 1
# end if
# end for
if match_count != 1:
await self.send(
'Sorry! You should select *one* novel from the list (%d selected).' % match_count)
await self.display_novel_selection()
return
# end if
self.selected_novel = selected
await self.display_sources_selection()
# end def
async def display_sources_selection(self):
async with self.user.typing():
await self.send(
(
'**%s** is found in %d sources:\n' % (
self.selected_novel['title'], len(self.selected_novel['novels']))
) + '\n'.join([
'%d. <%s> %s' % (
i + 1,
item['url'],
item['info'] if 'info' in item else ''
) for i, item in enumerate(self.selected_novel['novels'])
]) + '\n' +
'Enter index or name of your source.\n' +
'Send `!cancel` to stop this session.'
)
self.state = self.handle_sources_to_search
# end def
async def handle_sources_to_search(self):
if len(self.selected_novel['novels']) == 1:
novel = self.selected_novel['novels'][0]
await self.handle_search_result(novel)
return
# end if
text = self.message.content.strip()
if text.startswith('!cancel'):
await self.get_novel_url()
return
# end if
match_count = 0
selected = None
for i, res in enumerate(self.selected_novel['novels']):
if str(i + 1) == text:
selected = res
match_count += 1
elif text.isdigit() or len(text) < 3:
pass
elif res['url'].lower().find(text) != -1:
selected = res
match_count += 1
# end if
# end for
if match_count != 1:
await self.send(
'Sorry! You should select *one* source from the list (%d selected).' % match_count)
await self.display_sources_selection()
return
# end if
await self.handle_search_result(selected)
# end def
async def handle_search_result(self, novel):
await self.send('Selected: %s' % novel['url'])
self.app.init_crawler(novel['url'])
await self.get_novel_info()
# end def
async def get_novel_info(self):
if not self.app.crawler:
await self.send('Could not find any crawler to get your novel')
self.state = self.get_novel_info
return
# end if
# TODO: Handle login here
await self.send('Getting information about your novel...')
async with self.user.typing():
self.app.get_novel_info()
# Setup output path
good_name = os.path.basename(self.app.output_path)
output_path = os.path.abspath(
os.path.join('.discord_bot_output', str(self.user.id), good_name))
if os.path.exists(output_path):
shutil.rmtree(output_path, ignore_errors=True)
# end if
os.makedirs(output_path, exist_ok=True)
self.app.output_path = output_path
# Get chapter range
await self.send(
'It has %d volumes and %d chapters.' % (
len(self.app.crawler.volumes),
len(self.app.crawler.chapters)
)
)
await self.display_range_selection()
# end def
async def display_range_selection(self):
await self.send('\n'.join([
'Now you can send the following commands to modify what to download:',
'- To download everything send `!all` or pass `!cancel` to stop.',
'- Send `!last` followed by a number to download last few chapters. '
'If it does not followed by a number, last 50 chapters will be downloaded.',
'- Similarly you can send `!first` followed by a number to get first few chapters.',
'- Send `!volume` followed by volume numbers to download.',
'- To download a range of chatpers, Send `!chapter` followed by ' +
'two chapter numbers or urls separated by *space*. ' +
('Chapter number must be between 1 and %d, ' % len(self.app.crawler.chapters)) +
('and chapter urls should be from <%s>.' %
(self.app.crawler.home_url))
]))
self.state = self.handle_range_selection
# end def
async def handle_range_selection(self):
text = self.message.content.strip()
if text.startswith('!cancel'):
await self.get_novel_url()
return
# end if
if text.startswith('!all'):
self.app.chapters = self.app.crawler.chapters[:]
elif text.startswith('!first'):
text = text[len('!first'):].strip()
n = int(text) if text.isdigit() else 50
n = 50 if n < 0 else n
self.app.chapters = self.app.crawler.chapters[:n]
elif text.startswith('!last'):
text = text[len('!last'):].strip()
n = int(text) if text.isdigit() else 50
n = 50 if n < 0 else n
self.app.chapters = self.app.crawler.chapters[-n:]
elif text.startswith('!volume'):
text = text[len('!volume'):].strip()
selected = re.findall(r'\d+', text)
await self.send(
'Selected volumes: ' + ', '.join(selected),
)
selected = [int(x) for x in selected]
self.app.chapters = [
chap for chap in self.app.crawler.chapters
if selected.count(chap['volume']) > 0
]
elif text.startswith('!chapter'):
text = text[len('!chapter'):].strip()
pair = text.split(' ')
if len(pair) == 2:
def resolve_chapter(name):
cid = 0
if name.isdigit():
cid = int(name)
else:
cid = self.app.crawler.get_chapter_index_of(name)
# end if
return cid - 1
# end def
first = resolve_chapter(pair[0])
second = resolve_chapter(pair[1])
if first > second:
second, first = first, second
# end if
if first >= 0 or second < len(self.app.crawler.chapters):
self.app.chapters = self.app.crawler.chapters[first:second]
# end if
# end if
if len(self.app.chapters) == 0:
await self.send('Chapter range is not valid. Please try again')
return
# end if
else:
await self.send('Sorry! I did not recognize your input. Please try again')
return
# end if
if len(self.app.chapters) == 0:
await self.send('You have not selected any chapters. Please select at least one')
return
# end if
await self.send('Got your range selection')
await self.display_output_selection()
# end def
async def display_output_selection(self):
await self.send('\n'.join([
'Now you can choose book formats to download:',
'- Send `!cancel` to stop.',
'- Send `!all` to download all formats _(it may take a very very long time!)_',
'To select specific output formats:',
'- Send `pdf` to download only pdf format',
'- Send `mobi pdf` to download both pdf and mobi formats.',
'- Send `{space separated format names}` for multiple formats',
'Available formats: `' + '` `'.join(available_formats) + '`',
]))
self.state = self.handle_output_selection
# end def
async def handle_output_selection(self):
text = self.message.content.strip()
if text.startswith('!cancel'):
await self.get_novel_url()
return
# end if
if text.startswith('!all'):
self.app.output_formats = None
else:
output_format = set(re.findall(
'|'.join(available_formats),
text.lower()
))
if len(output_format):
self.app.output_formats = {
x: (x in output_format)
for x in available_formats
}
await self.send('I will generate e-book in (%s) format' % (', ' .join(output_format)))
else:
await self.send('Sorry! I did not recognize your input. Please try again')
return
# end if
# end if
await self.send('\n'.join([
'Starting download...',
'Send anything to view status.',
'Send `!cancel` to stop it.',
]))
self.status = ['', '']
self.state = self.report_download_progress
try:
self.executors.submit(self.start_download)
except Exception:
logger.exception('Download failure: %s', self.user.id)
# end try
# end def
def start_download(self):
self.app.pack_by_volume = False
self.status = ['**%s**' % self.app.crawler.novel_title]
self.status.append('Downloading %d chapters...' % len(self.app.chapters))
self.app.start_download()
self.status.append('Binding books...')
self.app.bind_books()
self.status[-1] = 'Book binding completed.'
self.status.append('Compressing output folder...')
self.app.compress_output()
self.status[-1] = 'Compressed output folder.'
self.status.append('Uploading files...')
for archive in self.app.archived_outputs:
asyncio.run_coroutine_threadsafe(
self.upload_file(archive),
self.client.loop
).result()
# end for
self.destroy()
# end def
async def upload_file(self, archive):
file_size = os.stat(archive).st_size
if file_size > 7.99 * 1024 * 1024:
await self.send('File %s exceeds 8MB. Uploading To Google Drive.' % os.path.basename(archive))
description = 'Generated By : Discord Bot Ebook Smelter'
link_id = upload(archive, description)
if link_id:
await self.send('https://drive.google.com/open?id=%s' % link_id)
else:
await self.send('Failed to upload to google drive')
# end if
else:
k = 0
while(file_size > 1024 and k < 3):
k += 1
file_size /= 1024.0
# end while
await self.send(
'Uploading %s [%d%s] ...' % (
os.path.basename(archive),
int(file_size * 100) / 100.0,
['B', 'KB', 'MB', 'GB'][k]
)
)
async with self.user.typing():
# await message.channel.send('Hello', file=discord.File('cool.png', 'testing.png'))
await self.user.send(
'Here you go ! ',
file=discord.File(
open(archive, 'rb'),
os.path.basename(archive)
)
)
# end if
# end def
async def report_download_progress(self):
text = self.message.content.strip()
if text == '!cancel':
await self.send('Closing the session')
self.destroy()
await self.send('Session is now closed. Type *anything* to create a new one.')
return
# end if
async with self.user.typing():
if self.app.progress < len(self.app.chapters):
self.status[1] = '%d out of %d chapters has been downloaded.' % (
self.app.progress, len(self.app.chapters))
else:
self.status[1] = 'Download complete.'
# end if
await self.send(
'\n'.join(self.status).strip() + '\n\n' +
'Send `!cancel` to stop'
)
# end def
# end class
|
the-stack_106_16441
|
"""IPv4 Static Routes Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from .devicerecords import DeviceRecords
from fmcapi.api_objects.object_services.networkaddresses import NetworkAddresses
from fmcapi.api_objects.object_services.slamonitors import SLAMonitors
from fmcapi.api_objects.object_services.hosts import Hosts
from fmcapi.api_objects.object_services.networkgroups import NetworkGroups
import logging
class IPv4StaticRoutes(APIClassTemplate):
"""The IPv4StaticRoute Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"interfaceName",
"selectedNetworks",
"gateway",
"routeTracking",
"metricValue",
"isTunneled",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + ["device_name"]
PREFIX_URL = "/devices/devicerecords"
URL_SUFFIX = None
REQUIRED_FOR_POST = ["interfaceName", "selectedNetworks", "gateway"]
REQUIRED_FOR_PUT = ["id", "device_id"]
def __init__(self, fmc, **kwargs):
"""
Initialize IPv4StaticRoutes object.
Set self.type to "IPv4StaticRoute" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for IPv4StaticRoute class.")
self.type = "IPv4StaticRoute"
self.parse_kwargs(**kwargs)
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for IPv4StaticRoute class.")
if "device_name" in kwargs:
self.device(device_name=kwargs["device_name"])
def device(self, device_name):
"""
Associate device to this route.
:param device_name: (str) Name of device.
:return: None
"""
logging.debug("In device() for IPv4StaticRoute class.")
device1 = DeviceRecords(fmc=self.fmc)
device1.get(name=device_name)
if "id" in device1.__dict__:
self.device_id = device1.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.device_id}/routing/ipv4staticroutes"
self.device_added_to_url = True
else:
logging.warning(
f"Device {device_name} not found. Cannot set up device for IPv4StaticRoute."
)
def networks(self, action, networks):
"""
Set of networks associated with this route.
:param action: (str) 'add', 'remove', or 'clear'
:param networks: (list) List of IP Hosts, IP Networks, and/or Network Groups.
:return: None
"""
logging.info("In networks() for IPv4StaticRoute class.")
if action == "add":
# Valid objects are IPHost, IPNetwork and NetworkGroup.
# Create a dictionary to contain all three object type.
ipaddresses_json = NetworkAddresses(fmc=self.fmc).get()
networkgroup_json = NetworkGroups(fmc=self.fmc).get()
items = ipaddresses_json.get("items", []) + networkgroup_json.get(
"items", []
)
for network in networks:
# Find the matching object name in the dictionary if it exists
net1 = list(filter(lambda i: i["name"] == network, items))
if len(net1) > 0:
if "selectedNetworks" in self.__dict__:
# Check to see if network already exists
exists = list(
filter(
lambda i: i["id"] == net1[0]["id"],
self.selectedNetworks,
)
)
if "id" in exists:
logging.warning(
f'Network "{network}" already exists in selectedNetworks.'
)
else:
self.selectedNetworks.append(
{
"type": net1[0]["type"],
"id": net1[0]["id"],
"name": net1[0]["name"],
}
)
else:
self.selectedNetworks = [
{
"type": net1[0]["type"],
"id": net1[0]["id"],
"name": net1[0]["name"],
}
]
else:
logging.warning(
f'Network "{network}" not found. Cannot set up device for IPv4StaticRoute.'
)
elif action == "remove":
ipaddresses_json = NetworkAddresses(fmc=self.fmc).get()
networkgroup_json = NetworkGroups(fmc=self.fmc).get()
items = ipaddresses_json.get("items", []) + networkgroup_json.get(
"items", []
)
for network in networks:
net1 = list(filter(lambda i: i["name"] == network, items))
if len(net1) > 0:
if "selectedNetworks" in self.__dict__:
self.selectedNetworks = list(
filter(
lambda i: i["id"] != net1[0]["id"],
self.selectedNetworks,
)
)
else:
logging.warning(
"No selectedNetworks found for this Device's IPv4StaticRoute."
)
else:
logging.warning(
f'Network "{network}" not found. Cannot set up device for IPv4StaticRoute.'
)
elif action == "clear":
if "selectedNetworks" in self.__dict__:
del self.selectedNetworks
logging.info(
"All selectedNetworks removed from this IPv4StaticRoute object."
)
def gw(self, name):
"""
Gateway for this route.
:param name: (str) Name of object that is the gateway address.
:return: None
"""
logging.info("In gw() for IPv4StaticRoute class.")
gw1 = Hosts(fmc=self.fmc)
gw1.get(name=name)
if "id" in gw1.__dict__:
self.gateway = {
"object": {"type": gw1.type, "id": gw1.id, "name": gw1.name}
}
else:
logging.warning(
f"Network {name} not found. Cannot set up device for IPv4StaticRoute."
)
def ipsla(self, name):
"""
SLA Monitor to assign to this route.
:param name: (str) Name of SLAMonitors() object.
:return: None
"""
logging.info("In ipsla() for IPv4StaticRoute class.")
ipsla1 = SLAMonitors(fmc=self.fmc)
ipsla1.get(name=name)
if "id" in ipsla1.__dict__:
self.routeTracking = {
"type": ipsla1.type,
"id": ipsla1.id,
"name": ipsla1.name,
}
else:
logging.warning(
f"Object {name} not found. Cannot set up device for IPv4StaticRoute."
)
|
the-stack_106_16443
|
# Determine if a string has all unique characters.
def is_unique(s):
for x in range(0, len(s)):
for y in range(x+1,len(s)):
if s[x]==s[y]:
return False
return True
# Should have used new array with simple 128 hash function and check for less than 128 size string
# That would have O(n) runtime (or O(1) because largest loop is 128 long)
def is_unique2(s):
if len(s)>128:
return False
charset=[False]*128
for letter in s:
key=ord(letter)
if charset[key]:
return False
charset[key]=True
return True
# Check if one string is a permutation of the other
def is_permutation(string_one, string_two):
if (len(string_one)!=len(string_two)):
return False
char_set = [0]*128
for letter in string_one:
char_set[ord(letter)]+=1
for letter in string_two:
char_set[ord(letter)]-=1
if char_set[ord(letter)] < 0:
return False
return True
import unittest
class TestStringsQuestions(unittest.TestCase):
def test_is_permutation(self):
self.assertTrue(is_permutation("i","i"))
self.assertFalse(is_permutation("hi","i"))
def test_is_unique(self):
self.assertTrue(is_unique('asdfghjk'))
self.assertFalse(is_unique('ff'))
def test_is_unique2(self):
self.assertTrue(is_unique2('asdfghjk'))
self.assertFalse(is_unique2('ff'))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16444
|
# Copyright 2013-2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies.
# Custom logic for several other packages are in separate files.
import copy
import functools
import os
import re
import stat
import json
import shlex
import shutil
import textwrap
from enum import Enum
from pathlib import PurePath
from .. import mlog
from .. import mesonlib
from ..compilers import clib_langs
from ..mesonlib import MesonException, OrderedSet
from ..mesonlib import Popen_safe, version_compare_many, version_compare, listify
# These must be defined in this file to avoid cyclical references.
packages = {}
_packages_accept_language = set()
class DependencyException(MesonException):
'''Exceptions raised while trying to find dependencies'''
class DependencyMethods(Enum):
# Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.
AUTO = 'auto'
PKGCONFIG = 'pkg-config'
QMAKE = 'qmake'
# Just specify the standard link arguments, assuming the operating system provides the library.
SYSTEM = 'system'
# This is only supported on OSX - search the frameworks directory by name.
EXTRAFRAMEWORK = 'extraframework'
# Detect using the sysconfig module.
SYSCONFIG = 'sysconfig'
# Specify using a "program"-config style tool
CONFIG_TOOL = 'config-tool'
# For backewards compatibility
SDLCONFIG = 'sdlconfig'
CUPSCONFIG = 'cups-config'
PCAPCONFIG = 'pcap-config'
LIBWMFCONFIG = 'libwmf-config'
# Misc
DUB = 'dub'
class Dependency:
@classmethod
def _process_method_kw(cls, kwargs):
method = kwargs.get('method', 'auto')
if method not in [e.value for e in DependencyMethods]:
raise DependencyException('method {!r} is invalid'.format(method))
method = DependencyMethods(method)
# This sets per-tool config methods which are deprecated to to the new
# generic CONFIG_TOOL value.
if method in [DependencyMethods.SDLCONFIG, DependencyMethods.CUPSCONFIG,
DependencyMethods.PCAPCONFIG, DependencyMethods.LIBWMFCONFIG]:
mlog.warning(textwrap.dedent("""\
Configuration method {} has been deprecated in favor of
'config-tool'. This will be removed in a future version of
meson.""".format(method)))
method = DependencyMethods.CONFIG_TOOL
# Set the detection method. If the method is set to auto, use any available method.
# If method is set to a specific string, allow only that detection method.
if method == DependencyMethods.AUTO:
methods = cls.get_methods()
elif method in cls.get_methods():
methods = [method]
else:
raise DependencyException(
'Unsupported detection method: {}, allowed methods are {}'.format(
method.value,
mlog.format_list([x.value for x in [DependencyMethods.AUTO] + cls.get_methods()])))
return methods
def __init__(self, type_name, kwargs):
self.name = "null"
self.version = None
self.language = None # None means C-like
self.is_found = False
self.type_name = type_name
self.compile_args = []
self.link_args = []
# Raw -L and -l arguments without manual library searching
# If None, self.link_args will be used
self.raw_link_args = None
self.sources = []
self.methods = self._process_method_kw(kwargs)
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def get_compile_args(self):
return self.compile_args
def get_link_args(self, raw=False):
if raw and self.raw_link_args is not None:
return self.raw_link_args
return self.link_args
def found(self):
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return self.sources
@staticmethod
def get_methods():
return [DependencyMethods.AUTO]
def get_name(self):
return self.name
def get_version(self):
if self.version:
return self.version
else:
return 'unknown'
def get_exe_args(self, compiler):
return []
def need_openmp(self):
return False
def need_threads(self):
return False
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('{!r} is not a pkgconfig dependency'.format(self.name))
def get_configtool_variable(self, variable_name):
raise DependencyException('{!r} is not a config-tool dependency'.format(self.name))
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
"""Create a new dependency that contains part of the parent dependency.
The following options can be inherited:
links -- all link_with arguemnts
includes -- all include_directory and -I/-isystem calls
sources -- any source, header, or generated sources
compile_args -- any compile args
link_args -- any link args
Additionally the new dependency will have the version parameter of it's
parent (if any) and the requested values of any dependencies will be
added as well.
"""
RuntimeError('Unreachable code in partial_dependency called')
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries, whole_libraries, sources, ext_deps):
super().__init__('internal', {})
self.version = version
self.is_found = True
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.whole_libraries = whole_libraries
self.sources = sources
self.ext_deps = ext_deps
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('Method "get_pkgconfig_variable()" is '
'invalid for an internal dependency')
def get_configtool_variable(self, variable_name):
raise DependencyException('Method "get_configtool_variable()" is '
'invalid for an internal dependency')
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
compile_args = self.compile_args.copy() if compile_args else []
link_args = self.link_args.copy() if link_args else []
libraries = self.libraries.copy() if links else []
whole_libraries = self.whole_libraries.copy() if links else []
sources = self.sources.copy() if sources else []
includes = self.include_directories.copy() if includes else []
deps = [d.get_partial_dependency(
compile_args=compile_args, link_args=link_args, links=links,
includes=includes, sources=sources) for d in self.ext_deps]
return InternalDependency(
self.version, includes, compile_args, link_args, libraries,
whole_libraries, sources, deps)
class ExternalDependency(Dependency):
def __init__(self, type_name, environment, language, kwargs):
super().__init__(type_name, kwargs)
self.env = environment
self.name = type_name # default
self.is_found = False
self.language = language
self.version_reqs = kwargs.get('version', None)
if isinstance(self.version_reqs, str):
self.version_reqs = [self.version_reqs]
self.required = kwargs.get('required', True)
self.silent = kwargs.get('silent', False)
self.static = kwargs.get('static', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
# Is this dependency for cross-compilation?
if 'native' in kwargs and self.env.is_cross_build():
self.want_cross = not kwargs['native']
else:
self.want_cross = self.env.is_cross_build()
self.clib_compiler = None
# Set the compiler that will be used by this dependency
# This is only used for configuration checks
if self.want_cross:
compilers = self.env.coredata.cross_compilers
else:
compilers = self.env.coredata.compilers
# Set the compiler for this dependency if a language is specified,
# else try to pick something that looks usable.
if self.language:
if self.language not in compilers:
m = self.name.capitalize() + ' requires a {0} compiler, but ' \
'{0} is not in the list of project languages'
raise DependencyException(m.format(self.language.capitalize()))
self.clib_compiler = compilers[self.language]
else:
# Try to find a compiler that can find C libraries for
# running compiler.find_library()
for lang in clib_langs:
self.clib_compiler = compilers.get(lang, None)
if self.clib_compiler:
break
def get_compiler(self):
return self.clib_compiler
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
new = copy.copy(self)
if not compile_args:
new.compile_args = []
if not link_args:
new.link_args = []
if not sources:
new.sources = []
return new
def log_details(self):
return ''
def log_info(self):
return ''
def log_tried(self):
return ''
# Check if dependency version meets the requirements
def _check_version(self):
if not self.is_found:
return
if self.version_reqs:
# an unknown version can never satisfy any requirement
if not self.version:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'), 'unknown version, but need:',
self.version_reqs]
mlog.log(*found_msg)
if self.required:
m = 'Unknown version of dependency {!r}, but need {!r}.'
raise DependencyException(m.format(self.name, self.version_reqs))
else:
(self.is_found, not_found, found) = \
version_compare_many(self.version, self.version_reqs)
if not self.is_found:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'),
'found {!r} but need:'.format(self.version),
', '.join(["'{}'".format(e) for e in not_found])]
if found:
found_msg += ['; matched:',
', '.join(["'{}'".format(e) for e in found])]
mlog.log(*found_msg)
if self.required:
m = 'Invalid version of dependency, need {!r} {!r} found {!r}.'
raise DependencyException(m.format(self.name, not_found, self.version))
return
class NotFoundDependency(Dependency):
def __init__(self, environment):
super().__init__('not-found', {})
self.env = environment
self.name = 'not-found'
self.is_found = False
class ConfigToolDependency(ExternalDependency):
"""Class representing dependencies found using a config tool."""
tools = None
tool_name = None
__strip_version = re.compile(r'^[0-9.]*')
def __init__(self, name, environment, language, kwargs):
super().__init__('config-tool', environment, language, kwargs)
self.name = name
self.native = kwargs.get('native', False)
self.tools = listify(kwargs.get('tools', self.tools))
req_version = kwargs.get('version', None)
tool, version = self.find_config(req_version)
self.config = tool
self.is_found = self.report_config(version, req_version)
if not self.is_found:
self.config = None
return
self.version = version
if getattr(self, 'finish_init', None):
self.finish_init(self)
def _sanitize_version(self, version):
"""Remove any non-numeric, non-point version suffixes."""
m = self.__strip_version.match(version)
if m:
# Ensure that there isn't a trailing '.', such as an input like
# `1.2.3.git-1234`
return m.group(0).rstrip('.')
return version
@classmethod
def factory(cls, name, environment, language, kwargs, tools, tool_name, finish_init=None):
"""Constructor for use in dependencies that can be found multiple ways.
In addition to the standard constructor values, this constructor sets
the tool_name and tools values of the instance.
"""
# This deserves some explanation, because metaprogramming is hard.
# This uses type() to create a dynamic subclass of ConfigToolDependency
# with the tools and tool_name class attributes set, this class is then
# instantiated and returned. The reduce function (method) is also
# attached, since python's pickle module won't be able to do anything
# with this dynamically generated class otherwise.
def reduce(self):
return (cls._unpickle, (), self.__dict__)
sub = type('{}Dependency'.format(name.capitalize()), (cls, ),
{'tools': tools, 'tool_name': tool_name, '__reduce__': reduce, 'finish_init': staticmethod(finish_init)})
return sub(name, environment, language, kwargs)
@classmethod
def _unpickle(cls):
return cls.__new__(cls)
def find_config(self, versions=None):
"""Helper method that searchs for config tool binaries in PATH and
returns the one that best matches the given version requirements.
"""
if not isinstance(versions, list) and versions is not None:
versions = listify(versions)
if self.env.is_cross_build() and not self.native:
cross_file = self.env.cross_info.config['binaries']
try:
tools = [cross_file[self.tool_name]]
except KeyError:
mlog.warning('No entry for {0} specified in your cross file. '
'Falling back to searching PATH. This may find a '
'native version of {0}!'.format(self.tool_name))
tools = self.tools
else:
tools = self.tools
best_match = (None, None)
for tool in tools:
try:
p, out = Popen_safe([tool, '--version'])[:2]
except (FileNotFoundError, PermissionError):
continue
if p.returncode != 0:
continue
out = self._sanitize_version(out.strip())
# Some tools, like pcap-config don't supply a version, but also
# don't fail with --version, in that case just assume that there is
# only one version and return it.
if not out:
return (tool, None)
if versions:
is_found = version_compare_many(out, versions)[0]
# This allows returning a found version without a config tool,
# which is useful to inform the user that you found version x,
# but y was required.
if not is_found:
tool = None
if best_match[1]:
if version_compare(out, '> {}'.format(best_match[1])):
best_match = (tool, out)
else:
best_match = (tool, out)
return best_match
def report_config(self, version, req_version):
"""Helper method to print messages about the tool."""
if self.config is None:
if version is not None:
mlog.log('Found', mlog.bold(self.tool_name), repr(version),
mlog.red('NO'), '(needed', req_version, ')')
else:
mlog.log('Found', mlog.bold(self.tool_name), repr(req_version),
mlog.red('NO'))
return False
mlog.log('Found {}:'.format(self.tool_name), mlog.bold(shutil.which(self.config)),
'({})'.format(version))
return True
def get_config_value(self, args, stage):
p, out, err = Popen_safe([self.config] + args)
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not generate {} for {}.\n{}'.format(
stage, self.name, err))
return []
return shlex.split(out)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]
def get_configtool_variable(self, variable_name):
p, out, _ = Popen_safe([self.config, '--{}'.format(variable_name)])
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not get variable "{}" for dependency {}'.format(
variable_name, self.name))
variable = out.strip()
mlog.debug('Got config-tool variable {} : {}'.format(variable_name, variable))
return variable
def log_tried(self):
return self.type_name
class PkgConfigDependency(ExternalDependency):
# The class's copy of the pkg-config path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_pkgbin = None
# We cache all pkg-config subprocess invocations to avoid redundant calls
pkgbin_cache = {}
def __init__(self, name, environment, kwargs, language=None):
super().__init__('pkgconfig', environment, language, kwargs)
self.name = name
self.is_libtool = False
# Store a copy of the pkg-config path on the object itself so it is
# stored in the pickled coredata and recovered.
self.pkgbin = None
# When finding dependencies for cross-compiling, we don't care about
# the 'native' pkg-config
if self.want_cross:
if 'pkgconfig' not in environment.cross_info.config['binaries']:
if self.required:
raise DependencyException('Pkg-config binary missing from cross file')
else:
potential_pkgbin = ExternalProgram.from_cross_info(environment.cross_info, 'pkgconfig')
if potential_pkgbin.found():
self.pkgbin = potential_pkgbin
PkgConfigDependency.class_pkgbin = self.pkgbin
else:
mlog.debug('Cross pkg-config %s not found.' % potential_pkgbin.name)
# Only search for the native pkg-config the first time and
# store the result in the class definition
elif PkgConfigDependency.class_pkgbin is None:
self.pkgbin = self.check_pkgconfig()
PkgConfigDependency.class_pkgbin = self.pkgbin
else:
self.pkgbin = PkgConfigDependency.class_pkgbin
if not self.pkgbin:
if self.required:
raise DependencyException('Pkg-config not found.')
return
mlog.debug('Determining dependency {!r} with pkg-config executable '
'{!r}'.format(name, self.pkgbin.get_path()))
ret, self.version = self._call_pkgbin(['--modversion', name])
if ret != 0:
return
try:
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
except DependencyException as e:
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
self.is_found = True
def __repr__(self):
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _call_pkgbin_real(self, args, env):
cmd = self.pkgbin.get_command() + args
p, out = Popen_safe(cmd, env=env)[0:2]
rc, out = p.returncode, out.strip()
call = ' '.join(cmd)
mlog.debug("Called `{}` -> {}\n{}".format(call, rc, out))
return rc, out
def _call_pkgbin(self, args, env=None):
if env is None:
fenv = env
env = os.environ
else:
fenv = frozenset(env.items())
targs = tuple(args)
cache = PkgConfigDependency.pkgbin_cache
if (self.pkgbin, targs, fenv) not in cache:
cache[(self.pkgbin, targs, fenv)] = self._call_pkgbin_real(args, env)
return cache[(self.pkgbin, targs, fenv)]
def _convert_mingw_paths(self, args):
'''
Both MSVC and native Python on Windows cannot handle MinGW-esque /c/foo
paths so convert them to C:/foo. We cannot resolve other paths starting
with / like /home/foo so leave them as-is so that the user gets an
error/warning from the compiler/linker.
'''
if not mesonlib.is_windows():
return args
converted = []
for arg in args:
pargs = []
# Library search path
if arg.startswith('-L/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-L{}:/{}'
elif arg.startswith('-I/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-I{}:/{}'
# Full path to library or .la file
elif arg.startswith('/'):
pargs = PurePath(arg).parts
tmpl = '{}:/{}'
if len(pargs) > 1 and len(pargs[1]) == 1:
arg = tmpl.format(pargs[1], '/'.join(pargs[2:]))
converted.append(arg)
return converted
def _set_cargs(self):
env = None
if self.language == 'fortran':
# gfortran doesn't appear to look in system paths for INCLUDE files,
# so don't allow pkg-config to suppress -I flags for system paths
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_CFLAGS'] = '1'
ret, out = self._call_pkgbin(['--cflags', self.name], env=env)
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n\n%s' %
(self.name, out))
self.compile_args = self._convert_mingw_paths(shlex.split(out))
def _search_libs(self, out, out_raw):
'''
@out: PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs
@out_raw: pkg-config --libs
We always look for the file ourselves instead of depending on the
compiler to find it with -lfoo or foo.lib (if possible) because:
1. We want to be able to select static or shared
2. We need the full path of the library to calculate RPATH values
3. De-dup of libraries is easier when we have absolute paths
Libraries that are provided by the toolchain or are not found by
find_library() will be added with -L -l pairs.
'''
# Library paths should be safe to de-dup
#
# First, figure out what library paths to use. Originally, we were
# doing this as part of the loop, but due to differences in the order
# of -L values between pkg-config and pkgconf, we need to do that as
# a separate step. See:
# https://github.com/mesonbuild/meson/issues/3951
# https://github.com/mesonbuild/meson/issues/4023
#
# Separate system and prefix paths, and ensure that prefix paths are
# always searched first.
prefix_libpaths = OrderedSet()
# We also store this raw_link_args on the object later
raw_link_args = self._convert_mingw_paths(shlex.split(out_raw))
for arg in raw_link_args:
if arg.startswith('-L') and not arg.startswith(('-L-l', '-L-L')):
prefix_libpaths.add(arg[2:])
system_libpaths = OrderedSet()
full_args = self._convert_mingw_paths(shlex.split(out))
for arg in full_args:
if arg.startswith(('-L-l', '-L-L')):
# These are D language arguments, not library paths
continue
if arg.startswith('-L') and arg[2:] not in prefix_libpaths:
system_libpaths.add(arg[2:])
# Use this re-ordered path list for library resolution
libpaths = list(prefix_libpaths) + list(system_libpaths)
# Track -lfoo libraries to avoid duplicate work
libs_found = OrderedSet()
# Track not-found libraries to know whether to add library paths
libs_notfound = []
libtype = 'static' if self.static else 'default'
# Generate link arguments for this library
link_args = []
for lib in full_args:
if lib.startswith(('-L-l', '-L-L')):
# These are D language arguments, add them as-is
pass
elif lib.startswith('-L'):
# We already handled library paths above
continue
elif lib.startswith('-l'):
# Don't resolve the same -lfoo argument again
if lib in libs_found:
continue
if self.clib_compiler:
args = self.clib_compiler.find_library(lib[2:], self.env,
libpaths, libtype)
# If the project only uses a non-clib language such as D, Rust,
# C#, Python, etc, all we can do is limp along by adding the
# arguments as-is and then adding the libpaths at the end.
else:
args = None
if args is not None:
libs_found.add(lib)
# Replace -l arg with full path to library if available
# else, library is either to be ignored, or is provided by
# the compiler, can't be resolved, and should be used as-is
if args:
if not args[0].startswith('-l'):
lib = args[0]
else:
continue
else:
# Library wasn't found, maybe we're looking in the wrong
# places or the library will be provided with LDFLAGS or
# LIBRARY_PATH from the environment (on macOS), and many
# other edge cases that we can't account for.
#
# Add all -L paths and use it as -lfoo
if lib in libs_notfound:
continue
if self.static:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(lib[2:], self.name))
libs_notfound.append(lib)
elif lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
self.is_libtool = True
lib = shared_lib
if lib in link_args:
continue
link_args.append(lib)
# Add all -Lbar args if we have -lfoo args in link_args
if libs_notfound:
# Order of -L flags doesn't matter with ld, but it might with other
# linkers such as MSVC, so prepend them.
link_args = ['-L' + lp for lp in prefix_libpaths] + link_args
return link_args, raw_link_args
def _set_libs(self):
env = None
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
# Force pkg-config to output -L fields even if they are system
# paths so we can do manual searching with cc.find_library() later.
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_LIBS'] = '1'
ret, out = self._call_pkgbin(libcmd, env=env)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out))
# Also get the 'raw' output without -Lfoo system paths for adding -L
# args with -lfoo when a library can't be found, and also in
# gnome.generate_gir + gnome.gtkdoc which need -L -l arguments.
ret, out_raw = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out_raw))
self.link_args, self.raw_link_args = self._search_libs(out, out_raw)
def get_pkgconfig_variable(self, variable_name, kwargs):
options = ['--variable=' + variable_name, self.name]
if 'define_variable' in kwargs:
definition = kwargs.get('define_variable', [])
if not isinstance(definition, list):
raise MesonException('define_variable takes a list')
if len(definition) != 2 or not all(isinstance(i, str) for i in definition):
raise MesonException('define_variable must be made up of 2 strings for VARIABLENAME and VARIABLEVALUE')
options = ['--define-variable=' + '='.join(definition)] + options
ret, out = self._call_pkgbin(options)
variable = ''
if ret != 0:
if self.required:
raise DependencyException('dependency %s not found.' %
(self.name))
else:
variable = out.strip()
# pkg-config doesn't distinguish between empty and non-existent variables
# use the variable list to check for variable existence
if not variable:
ret, out = self._call_pkgbin(['--print-variables', self.name])
if not re.search(r'^' + variable_name + r'$', out, re.MULTILINE):
if 'default' in kwargs:
variable = kwargs['default']
else:
mlog.warning("pkgconfig variable '%s' not defined for dependency %s." % (variable_name, self.name))
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG]
def check_pkgconfig(self):
evar = 'PKG_CONFIG'
if evar in os.environ:
pkgbin = os.environ[evar].strip()
else:
pkgbin = 'pkg-config'
pkgbin = ExternalProgram(pkgbin, silent=True)
if pkgbin.found():
try:
p, out = Popen_safe(pkgbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found pkg-config {!r} but couldn\'t run it'
''.format(' '.join(pkgbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
pkgbin = False
except (FileNotFoundError, PermissionError):
pkgbin = False
else:
pkgbin = False
if not self.silent:
if pkgbin:
mlog.log('Found pkg-config:', mlog.bold(pkgbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found Pkg-config:', mlog.red('NO'))
return pkgbin
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
def log_tried(self):
return self.type_name
class DubDependency(ExternalDependency):
class_dubbin = None
def __init__(self, name, environment, kwargs):
super().__init__('dub', environment, 'd', kwargs)
self.name = name
self.compiler = super().get_compiler()
if 'required' in kwargs:
self.required = kwargs.get('required')
if DubDependency.class_dubbin is None:
self.dubbin = self.check_dub()
DubDependency.class_dubbin = self.dubbin
else:
self.dubbin = DubDependency.class_dubbin
if not self.dubbin:
if self.required:
raise DependencyException('DUB not found.')
self.is_found = False
return
mlog.debug('Determining dependency {!r} with DUB executable '
'{!r}'.format(name, self.dubbin.get_path()))
# Ask dub for the package
ret, res = self._call_dubbin(['describe', name])
if ret != 0:
self.is_found = False
return
j = json.loads(res)
comp = self.compiler.get_id().replace('llvm', 'ldc').replace('gcc', 'gdc')
for package in j['packages']:
if package['name'] == name:
if j['compiler'] != comp:
msg = ['Dependency', mlog.bold(name), 'found but it was compiled with']
msg += [mlog.bold(j['compiler']), 'and we are using', mlog.bold(comp)]
mlog.error(*msg)
self.is_found = False
return
self.version = package['version']
self.pkg = package
break
if self.pkg['targetFileName'].endswith('.a'):
self.static = True
self.compile_args = []
for flag in self.pkg['dflags']:
self.link_args.append(flag)
for path in self.pkg['importPaths']:
self.compile_args.append('-I' + os.path.join(self.pkg['path'], path))
self.link_args = []
for flag in self.pkg['lflags']:
self.link_args.append(flag)
search_paths = []
search_paths.append(os.path.join(self.pkg['path'], self.pkg['targetPath']))
found, res = self.__search_paths(search_paths, self.pkg['targetFileName'])
for file in res:
self.link_args.append(file)
self.is_found = found
def get_compiler(self):
return self.compiler
def __search_paths(self, search_paths, target_file):
found = False
res = []
if target_file == '':
return True, res
for path in search_paths:
if os.path.isdir(path):
for file in os.listdir(path):
if file == target_file:
res.append(os.path.join(path, file))
found = True
return found, res
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
@staticmethod
def get_methods():
return [DependencyMethods.DUB]
class ExternalProgram:
windows_exts = ('exe', 'msc', 'com', 'bat', 'cmd')
def __init__(self, name, command=None, silent=False, search_dir=None):
self.name = name
if command is not None:
self.command = listify(command)
else:
self.command = self._search(name, search_dir)
# Set path to be the last item that is actually a file (in order to
# skip options in something like ['python', '-u', 'file.py']. If we
# can't find any components, default to the last component of the path.
self.path = self.command[-1]
for i in range(len(self.command) - 1, -1, -1):
arg = self.command[i]
if arg is not None and os.path.isfile(arg):
self.path = arg
break
if not silent:
if self.found():
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.command))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
@staticmethod
def from_cross_info(cross_info, name):
if name not in cross_info.config['binaries']:
return NonExistingExternalProgram()
command = cross_info.config['binaries'][name]
if not isinstance(command, (list, str)):
raise MesonException('Invalid type {!r} for binary {!r} in cross file'
''.format(command, name))
if isinstance(command, list):
if len(command) == 1:
command = command[0]
# We cannot do any searching if the command is a list, and we don't
# need to search if the path is an absolute path.
if isinstance(command, list) or os.path.isabs(command):
return ExternalProgram(name, command=command, silent=True)
# Search for the command using the specified string!
return ExternalProgram(command, silent=True)
@staticmethod
def _shebang_to_cmd(script):
"""
Check if the file has a shebang and manually parse it to figure out
the interpreter to use. This is useful if the script is not executable
or if we're on Windows (which does not understand shebangs).
"""
try:
with open(script) as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
# In a shebang, everything before the first space is assumed to
# be the command to run and everything after the first space is
# the single argument to pass to that command. So we must split
# exactly once.
commands = first_line[2:].split('#')[0].strip().split(maxsplit=1)
if mesonlib.is_windows():
# Windows does not have UNIX paths so remove them,
# but don't remove Windows paths
if commands[0].startswith('/'):
commands[0] = commands[0].split('/')[-1]
if len(commands) > 0 and commands[0] == 'env':
commands = commands[1:]
# Windows does not ship python3.exe, but we know the path to it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
elif mesonlib.is_haiku():
# Haiku does not have /usr, but a lot of scripts assume that
# /usr/bin/env always exists. Detect that case and run the
# script with the interpreter after it.
if commands[0] == '/usr/bin/env':
commands = commands[1:]
# We know what python3 is, we're running on it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
return commands + [script]
except Exception as e:
mlog.debug(e)
pass
mlog.debug('Unusable script {!r}'.format(script))
return False
def _is_executable(self, path):
suffix = os.path.splitext(path)[-1].lower()[1:]
if mesonlib.is_windows():
if suffix in self.windows_exts:
return True
elif os.access(path, os.X_OK):
return not os.path.isdir(path)
return False
def _search_dir(self, name, search_dir):
if search_dir is None:
return False
trial = os.path.join(search_dir, name)
if os.path.exists(trial):
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is
# a) not chmodded executable, or
# b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
else:
if mesonlib.is_windows():
for ext in self.windows_exts:
trial_ext = '{}.{}'.format(trial, ext)
if os.path.exists(trial_ext):
return [trial_ext]
return False
def _search_windows_special_cases(self, name, command):
'''
Lots of weird Windows quirks:
1. PATH search for @name returns files with extensions from PATHEXT,
but only self.windows_exts are executable without an interpreter.
2. @name might be an absolute path to an executable, but without the
extension. This works inside MinGW so people use it a lot.
3. The script is specified without an extension, in which case we have
to manually search in PATH.
4. More special-casing for the shebang inside the script.
'''
if command:
# On Windows, even if the PATH search returned a full path, we can't be
# sure that it can be run directly if it's not a native executable.
# For instance, interpreted scripts sometimes need to be run explicitly
# with an interpreter if the file association is not done properly.
name_ext = os.path.splitext(command)[1]
if name_ext[1:].lower() in self.windows_exts:
# Good, it can be directly executed
return [command]
# Try to extract the interpreter from the shebang
commands = self._shebang_to_cmd(command)
if commands:
return commands
return [None]
# Maybe the name is an absolute path to a native Windows
# executable, but without the extension. This is technically wrong,
# but many people do it because it works in the MinGW shell.
if os.path.isabs(name):
for ext in self.windows_exts:
command = '{}.{}'.format(name, ext)
if os.path.exists(command):
return [command]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = os.environ.get('PATH', '').split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def _search(self, name, search_dir):
'''
Search in the specified dir for the specified executable by name
and if not found search in PATH
'''
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
command = shutil.which(name)
if mesonlib.is_windows():
return self._search_windows_special_cases(name, command)
# On UNIX-like platforms, shutil.which() is enough to find
# all executables whether in PATH or with an absolute path
return [command]
def found(self):
return self.command[0] is not None
def get_command(self):
return self.command[:]
def get_path(self):
return self.path
def get_name(self):
return self.name
class NonExistingExternalProgram(ExternalProgram):
"A program that will never exist"
def __init__(self):
self.name = 'nonexistingprogram'
self.command = [None]
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return False
class EmptyExternalProgram(ExternalProgram):
'''
A program object that returns an empty list of commands. Used for cases
such as a cross file exe_wrapper to represent that it's not required.
'''
def __init__(self):
self.name = None
self.command = []
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return True
class ExternalLibrary(ExternalDependency):
def __init__(self, name, link_args, environment, language, silent=False):
super().__init__('library', environment, language, {})
self.name = name
self.language = language
self.is_found = False
if link_args:
self.is_found = True
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def get_link_args(self, language=None, **kwargs):
'''
External libraries detected using a compiler must only be used with
compatible code. For instance, Vala libraries (.vapi files) cannot be
used with C code, and not all Rust library types can be linked with
C-like code. Note that C++ libraries *can* be linked with C code with
a C++ linker (and vice-versa).
'''
# Using a vala library in a non-vala target, or a non-vala library in a vala target
# XXX: This should be extended to other non-C linkers such as Rust
if (self.language == 'vala' and language != 'vala') or \
(language == 'vala' and self.language != 'vala'):
return []
return super().get_link_args(**kwargs)
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
# External library only has link_args, so ignore the rest of the
# interface.
new = copy.copy(self)
if not link_args:
new.link_args = []
return new
class ExtraFrameworkDependency(ExternalDependency):
def __init__(self, name, required, path, env, lang, kwargs):
super().__init__('extraframeworks', env, lang, kwargs)
self.name = name
self.required = required
self.detect(name, path)
if self.found():
self.compile_args = ['-I' + os.path.join(self.path, self.name, 'Headers')]
self.link_args = ['-F' + self.path, '-framework', self.name.split('.')[0]]
def detect(self, name, path):
lname = name.lower()
if path is None:
paths = ['/System/Library/Frameworks', '/Library/Frameworks']
else:
paths = [path]
for p in paths:
for d in os.listdir(p):
fullpath = os.path.join(p, d)
if lname != d.rsplit('.', 1)[0].lower():
continue
if not stat.S_ISDIR(os.stat(fullpath).st_mode):
continue
self.path = p
self.name = d
self.is_found = True
return
def log_info(self):
return os.path.join(self.path, self.name)
def log_tried(self):
return 'framework'
def get_dep_identifier(name, kwargs, want_cross):
# Need immutable objects since the identifier will be used as a dict key
version_reqs = listify(kwargs.get('version', []))
if isinstance(version_reqs, list):
version_reqs = frozenset(version_reqs)
identifier = (name, version_reqs, want_cross)
for key, value in kwargs.items():
# 'version' is embedded above as the second element for easy access
# 'native' is handled above with `want_cross`
# 'required' is irrelevant for caching; the caller handles it separately
# 'fallback' subprojects cannot be cached -- they must be initialized
if key in ('version', 'native', 'required', 'fallback',):
continue
# All keyword arguments are strings, ints, or lists (or lists of lists)
if isinstance(value, list):
value = frozenset(listify(value))
identifier += (key, value)
return identifier
display_name_map = {
'boost': 'Boost',
'dub': 'DUB',
'gmock': 'GMock',
'gtest': 'GTest',
'llvm': 'LLVM',
'mpi': 'MPI',
'openmp': 'OpenMP',
'wxwidgets': 'WxWidgets',
}
def find_external_dependency(name, env, kwargs):
assert(name)
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
if not isinstance(kwargs.get('method', ''), str):
raise DependencyException('Keyword "method" must be a string.')
lname = name.lower()
if lname not in _packages_accept_language and 'language' in kwargs:
raise DependencyException('%s dependency does not accept "language" keyword argument' % (name, ))
if not isinstance(kwargs.get('version', ''), (str, list)):
raise DependencyException('Keyword "Version" must be string or list.')
# display the dependency name with correct casing
display_name = display_name_map.get(lname, lname)
# if this isn't a cross-build, it's uninteresting if native: is used or not
if not env.is_cross_build():
type_text = 'Dependency'
else:
type_text = 'Native' if kwargs.get('native', False) else 'Cross'
type_text += ' dependency'
# build a list of dependency methods to try
candidates = _build_external_dependency_list(name, env, kwargs)
pkg_exc = None
pkgdep = []
details = ''
for c in candidates:
# try this dependency method
try:
d = c()
d._check_version()
pkgdep.append(d)
except Exception as e:
mlog.debug(str(e))
# store the first exception we see
if not pkg_exc:
pkg_exc = e
else:
details = d.log_details()
if details:
details = '(' + details + ') '
if 'language' in kwargs:
details += 'for ' + d.language + ' '
# if the dependency was found
if d.found():
info = d.log_info()
if info:
info = ', ' + info
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.green('YES'), (d.version if d.version else '') + info)
return d
# otherwise, the dependency could not be found
tried_methods = [d.log_tried() for d in pkgdep if d.log_tried()]
if tried_methods:
tried = '{}'.format(mlog.format_list(tried_methods))
else:
tried = ''
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.red('NO'),
'(tried {})'.format(tried) if tried else '')
if required:
# if exception(s) occurred, re-raise the first one (on the grounds that
# it came from a preferred dependency detection method)
if pkg_exc:
raise pkg_exc
# we have a list of failed ExternalDependency objects, so we can report
# the methods we tried to find the dependency
raise DependencyException('Dependency "%s" not found, tried %s' % (name, tried))
# return the last failed dependency object
if pkgdep:
return pkgdep[-1]
# this should never happen
raise DependencyException('Dependency "%s" not found, but no dependency object to return' % (name))
def _build_external_dependency_list(name, env, kwargs):
# Is there a specific dependency detector for this dependency?
lname = name.lower()
if lname in packages:
# Create the list of dependency object constructors using a factory
# class method, if one exists, otherwise the list just consists of the
# constructor
if getattr(packages[lname], '_factory', None):
dep = packages[lname]._factory(env, kwargs)
else:
dep = [functools.partial(packages[lname], env, kwargs)]
return dep
candidates = []
# If it's explicitly requested, use the dub detection method (only)
if 'dub' == kwargs.get('method', ''):
candidates.append(functools.partial(DubDependency, name, env, kwargs))
return candidates
# TBD: other values of method should control what method(s) are used
# Otherwise, just use the pkgconfig dependency detector
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
# On OSX, also try framework dependency detector
if mesonlib.is_osx():
candidates.append(functools.partial(ExtraFrameworkDependency, name,
False, None, env, None, kwargs))
return candidates
def strip_system_libdirs(environment, link_args):
"""Remove -L<system path> arguments.
leaving these in will break builds where a user has a version of a library
in the system path, and a different version not in the system path if they
want to link against the non-system path version.
"""
exclude = {'-L{}'.format(p) for p in environment.get_compiler_system_dirs()}
return [l for l in link_args if l not in exclude]
|
the-stack_106_16445
|
import pygame
class Ship():
def __init__(self,screen):
"""初始化飞船并设置其初始位置"""
self.screen = screen
#加载飞船图像并获取其外接矩形
self.image = pygame.image.load('image/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
#将每艘新飞船放在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image, self.rect)
|
the-stack_106_16448
|
from math import atan2
from cereal import car
from common.numpy_fast import interp
from common.realtime import DT_DMON
from selfdrive.hardware import TICI
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
EventName = car.CarEvent.EventName
# ******************************************************************************************
# NOTE: To fork maintainers.
# Disabling or nerfing safety features will get you and your users banned from our servers.
# We recommend that you do not change these numbers from the defaults.
# ******************************************************************************************
class DRIVER_MONITOR_SETTINGS():
def __init__(self, TICI=TICI, DT_DMON=DT_DMON):
self._DT_DMON = DT_DMON
self._AWARENESS_TIME = 35. # passive wheeltouch total timeout
self._AWARENESS_PRE_TIME_TILL_TERMINAL = 12.
self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
self._DISTRACTED_TIME = 11. # active monitoring total timeout
self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
self._FACE_THRESHOLD = 0.5
self._PARTIAL_FACE_THRESHOLD = 0.765 if TICI else 0.43
self._EYE_THRESHOLD = 0.61 if TICI else 0.55
self._SG_THRESHOLD = 0.89 if TICI else 0.86
self._BLINK_THRESHOLD = 0.82 if TICI else 0.588
self._BLINK_THRESHOLD_SLACK = 0.9 if TICI else 0.77
self._BLINK_THRESHOLD_STRICT = self._BLINK_THRESHOLD
self._PITCH_WEIGHT = 1.35 # pitch matters a lot more
self._POSESTD_THRESHOLD = 0.38 if TICI else 0.3
self._METRIC_THRESHOLD = 0.48
self._METRIC_THRESHOLD_SLACK = 0.66
self._METRIC_THRESHOLD_STRICT = self._METRIC_THRESHOLD
self._PITCH_NATURAL_OFFSET = 0.02 # people don't seem to look straight when they drive relaxed, rather a bit up
self._YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
self._POSE_CALIB_MIN_SPEED = 13 # 30 mph
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON) # valid data counts before calibration completes, 1min cumulative
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON) # stop deweighting new data after 6 min, aka "short term memory"
self._RECOVERY_FACTOR_MAX = 5. # relative to minus step change
self._RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
self._MAX_TERMINAL_ALERTS = 300 # not allowed to engage after 3 terminal alerts
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON) # not allowed to engage after 30s of terminal alerts
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType:
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net, yaw_net, roll_net = angles_desc
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll_net, pitch, yaw
class DriverPose():
def __init__(self, max_trackable):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.yaw_std = 0.
self.pitch_std = 0.
self.roll_std = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=max_trackable)
self.yaw_offseter = RunningStatFilter(max_trackable=max_trackable)
self.low_std = True
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self, rhd=False, settings=DRIVER_MONITOR_SETTINGS()):
# init policy settings
self.settings = settings
# init driver status
self.is_rhd_region = rhd
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.pose_calibrated = False
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
self.face_detected = False
self.face_partial = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.is_model_uncertain = False
self.hi_stds = 0
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = self.settings._AWARENESS_PRE_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.threshold_prompt = self.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - self.settings._PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - self.settings._YAW_NATURAL_OFFSET
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
pitch_error = 0 if pitch_error > 0 else abs(pitch_error) # no positive pitch limit
yaw_error = abs(yaw_error)
if pitch_error*self.settings._PITCH_WEIGHT > self.settings._METRIC_THRESHOLD*pose.cfactor or \
yaw_error > self.settings._METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def set_policy(self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._METRIC_THRESHOLD_STRICT,
self.settings. _METRIC_THRESHOLD,
self.settings._METRIC_THRESHOLD_SLACK]) / self.settings._METRIC_THRESHOLD
self.blink.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._BLINK_THRESHOLD_STRICT,
self.settings._BLINK_THRESHOLD,
self.settings._BLINK_THRESHOLD_SLACK]) / self.settings._BLINK_THRESHOLD
def get_pose(self, driver_state, cal_rpy, car_speed, op_engaged):
if not all(len(x) > 0 for x in [driver_state.faceOrientation, driver_state.facePosition,
driver_state.faceOrientationStd, driver_state.facePositionStd]):
return
self.face_partial = driver_state.partialFace > self.settings._PARTIAL_FACE_THRESHOLD
self.face_detected = driver_state.faceProb > self.settings._FACE_THRESHOLD or self.face_partial
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD and not self.face_partial
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.driver_distracted = self._is_driver_distracted(self.pose, self.blink) > 0 and \
driver_state.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not self.is_model_uncertain)
if self.face_detected and not self.pose.low_std and not self.driver_distracted:
self.hi_stds += 1
elif self.face_detected and self.pose.low_std:
self.hi_stds = 0
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.pose.low_std and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((self.settings._RECOVERY_FACTOR_MAX-self.settings._RECOVERY_FACTOR_MIN)*(1.-self.awareness)+self.settings._RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return
standstill_exemption = standstill and self.awareness - self.step_change <= self.threshold_prompt
certainly_distracted = self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected
maybe_distracted = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME or not self.face_detected
if certainly_distracted or maybe_distracted:
# should always be counting if distracted unless at standstill and reaching orange
if not standstill_exemption:
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
if alert is not None:
events.add(alert)
|
the-stack_106_16451
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles converting of formatting."""
import cgi
from . import constants
class FormattingHandler(object):
"""Class that handles the conversion of formatting."""
# Links with these URL schemas are auto-linked by GFM.
_GFM_AUTO_URL_SCHEMAS = ("http://", "https://")
# Images that were inlined automatically by Wiki syntax
# had to have these URL schemas and image extensions.
_IMAGE_URL_SCHEMAS = ("http://", "https://", "ftp://")
_IMAGE_EXTENSIONS = (".png", ".gif", ".jpg", ".jpeg", ".svg")
# Template for linking to a video.
_VIDEO_TEMPLATE = (
"<a href='http://www.youtube.com/watch?feature=player_embedded&v={0}' "
"target='_blank'><img src='http://img.youtube.com/vi/{0}/0.jpg' "
"width='{1}' height={2} /></a>")
# Formatting tags for list-to-HTML conversion.
_HTML_LIST_TAGS = {
"Numeric list": {
"ListTag": "ol",
"ItemTag": "li",
},
"Bulleted list": {
"ListTag": "ul",
"ItemTag": "li",
},
"Blockquote": {
"ListTag": "blockquote",
"ItemTag": None,
},
}
# Formatting tags for formatting-to-HTML conversion.
_HTML_FORMAT_TAGS = {
"Bold": {
"Markdown": "**",
"HTML": "b",
},
"Italic": {
"Markdown": "_",
"HTML": "i",
},
"Strikethrough": {
"Markdown": "~~",
"HTML": "del",
},
}
# How a single indentation is outputted.
_SINGLE_INDENTATION = " " * 2
def __init__(self, warning_method, project, issue_map, symmetric_headers):
"""Create a formatting handler.
Args:
warning_method: A function to call to display a warning message.
project: The name of the Google Code project for the Wiki page.
issue_map: A dictionary of Google Code issues to GitHub issues.
symmetric_headers: True if header denotations are symmetric.
"""
self._warning_method = warning_method
self._project = project
self._issue_map = issue_map
self._symmetric_headers = symmetric_headers
# GFM has a quirk with nested blockquotes where a blank line is needed
# after closing a nested blockquote while continuing into another.
self._last_blockquote_indent = 0
# GFM will not apply formatting if whitespace surrounds the text being
# formatted, but Wiki will. To work around this, we maintain a buffer
# of text to be outputted, and when the tag is closed we can trim the
# buffer before applying formatting. If the trimmed buffer is empty, we
# can omit the formatting altogether to avoid GFM rendering issues.
self._format_buffer = []
# GitHub won't render formatting within HTML tags. Track if this is the
# case so we can issue a warning and try a work-around.
self._in_html = 0 # Number of tags currently open.
self._in_code_block = False # If we're in a code block in HTML.
self._has_written_text = False # If we've written text since the last tag.
self._list_tags = [] # If writing HTML for lists, the current list tags.
self._table_status = None # Where we are in outputting an HTML table.
# GitHub doesn't support HTML comments, so as a workaround we give
# a bogus and empty <a> tag, which renders as nothing.
self._in_comment = False
def HandleHeaderOpen(self, input_line, output_stream, header_level):
"""Handle the output for opening a header.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
header_level: The header level.
"""
if self._in_html:
tag = u"h{0}".format(header_level)
self.HandleHtmlOpen(input_line, output_stream, tag, {}, False)
else:
self._Write("#" * header_level + " ", output_stream)
def HandleHeaderClose(
self,
input_line,
output_stream,
header_level):
"""Handle the output for closing a header.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
header_level: The header level.
"""
if self._in_html:
tag = u"h{0}".format(header_level)
self.HandleHtmlClose(input_line, output_stream, tag)
else:
if self._symmetric_headers:
self._Write(" " + "#" * header_level, output_stream)
def HandleHRule(self, input_line, output_stream):
"""Handle the output for a horizontal rule.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._in_html:
self.HandleHtmlOpen(input_line, output_stream, "hr", {}, True)
else:
# One newline needed before to separate from text, and not make a header.
self._Write("\n---\n", output_stream)
def HandleCodeBlockOpen(self, input_line, output_stream, specified_language):
"""Handle the output for starting a code block.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
specified_language: Language for the code block, or None.
"""
if self._in_html:
self._PrintHtmlWarning(input_line, "Code")
self.HandleHtmlOpen(input_line, output_stream, "pre", {}, False)
self.HandleHtmlOpen(input_line, output_stream, "code", {}, False)
else:
if not specified_language:
specified_language = ""
self._Write(u"```{0}\n".format(specified_language), output_stream)
self._in_code_block = True
def HandleCodeBlockClose(self, input_line, output_stream):
"""Handle the output for ending a code block.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._in_code_block = False
if self._in_html:
self.HandleHtmlClose(input_line, output_stream, "code")
self.HandleHtmlClose(input_line, output_stream, "pre")
else:
self._Write("```", output_stream)
def HandleNumericListOpen(
self,
input_line,
output_stream,
indentation_level):
"""Handle the output for the opening of a numeric list item.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
indentation_level: The indentation level for the item.
"""
if self._in_html:
self._HandleHtmlListOpen(
input_line,
output_stream,
indentation_level,
"Numeric list")
else:
self._Indent(output_stream, indentation_level)
# Just using any number implies a numbered item,
# so we take the easy route.
self._Write("1. ", output_stream)
def HandleBulletListOpen(
self,
input_line,
output_stream,
indentation_level):
"""Handle the output for the opening of a bulleted list item.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
indentation_level: The indentation level for the item.
"""
if self._in_html:
self._HandleHtmlListOpen(
input_line,
output_stream,
indentation_level,
"Bulleted list")
else:
self._Indent(output_stream, indentation_level)
self._Write("* ", output_stream)
def HandleBlockQuoteOpen(
self,
input_line,
output_stream,
indentation_level):
"""Handle the output for the opening of a block quote line.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
indentation_level: The indentation level for the item.
"""
if self._in_html:
self._HandleHtmlListOpen(
input_line,
output_stream,
indentation_level,
"Blockquote")
else:
if self._last_blockquote_indent > indentation_level:
self._Write("\n", output_stream)
self._last_blockquote_indent = indentation_level
# Blockquotes are nested not by indentation but by nesting.
self._Write("> " * indentation_level, output_stream)
def HandleListClose(self, input_line, output_stream):
"""Handle the output for the closing of a list.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._in_html:
self._HandleHtmlListClose(input_line, output_stream)
def HandleParagraphBreak(self, unused_input_line, output_stream):
"""Handle the output for a new paragraph.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._Write("\n", output_stream)
def HandleBoldOpen(self, input_line, unused_output_stream):
"""Handle the output for starting bold formatting.
Args:
input_line: Current line number being processed.
unused_output_stream: Output Markdown file.
"""
if self._in_html:
self._PrintHtmlWarning(input_line, "Bold")
# Open up another buffer.
self._format_buffer.append("")
def HandleBoldClose(self, input_line, output_stream):
"""Handle the output for ending bold formatting.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._HandleFormatClose(input_line, output_stream, "Bold")
def HandleItalicOpen(self, input_line, unused_output_stream):
"""Handle the output for starting italic formatting.
Args:
input_line: Current line number being processed.
unused_output_stream: Output Markdown file.
"""
if self._in_html:
self._PrintHtmlWarning(input_line, "Italic")
# Open up another buffer.
self._format_buffer.append("")
def HandleItalicClose(self, input_line, output_stream):
"""Handle the output for ending italic formatting.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._HandleFormatClose(input_line, output_stream, "Italic")
def HandleStrikethroughOpen(self, input_line, unused_output_stream):
"""Handle the output for starting strikethrough formatting.
Args:
input_line: Current line number being processed.
unused_output_stream: Output Markdown file.
"""
if self._in_html:
self._PrintHtmlWarning(input_line, "Strikethrough")
# Open up another buffer.
self._format_buffer.append("")
def HandleStrikethroughClose(self, input_line, output_stream):
"""Handle the output for ending strikethrough formatting.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._HandleFormatClose(input_line, output_stream, "Strikethrough")
def HandleSuperscript(self, unused_input_line, output_stream, text):
"""Handle the output for superscript text.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
text: The text to output.
"""
# Markdown currently has no dedicated markup for superscript.
self._Write(u"<sup>{0}</sup>".format(text), output_stream)
def HandleSubscript(self, unused_input_line, output_stream, text):
"""Handle the output for subscript text.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
text: The text to output.
"""
# Markdown currently has no dedicated markup for subscript.
self._Write(u"<sub>{0}</sub>".format(text), output_stream)
def HandleInlineCode(self, input_line, output_stream, code):
"""Handle the output for a code block.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
code: The code inlined.
"""
if self._in_html:
self.HandleHtmlOpen(input_line, output_stream, "code", {}, False)
self.HandleText(input_line, output_stream, cgi.escape(code))
self.HandleHtmlClose(input_line, output_stream, "code")
else:
# To render backticks within inline code, the surrounding tick count
# must be one greater than the number of consecutive ticks in the code.
# E.g.:
# `this is okay, no ticks in the code`
# `` `one consecutive tick in the code implies two in the delimiter` ``
# ``` `` `and two consecutive ticks in here implies three -> ```
max_consecutive_ticks = 0
consecutive_ticks = 0
for char in code:
if char == "`":
consecutive_ticks += 1
max_consecutive_ticks = max(max_consecutive_ticks, consecutive_ticks)
else:
consecutive_ticks = 0
surrounding_ticks = "`" * (max_consecutive_ticks + 1)
self._Write(u"{0}{1}{0}".format(surrounding_ticks, code), output_stream)
def HandleTableCellBorder(self, input_line, output_stream):
"""Handle the output for a table cell border.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._in_html:
if not self._table_status:
# Starting a new table.
self._PrintHtmlWarning(input_line, "Table")
self.HandleHtmlOpen(input_line, output_stream, "table", {}, False)
self.HandleHtmlOpen(input_line, output_stream, "thead", {}, False)
self.HandleHtmlOpen(input_line, output_stream, "th", {}, False)
self._table_status = "header"
elif self._table_status == "header":
# Header cell. Close the previous cell, open the next one.
self.HandleHtmlClose(input_line, output_stream, "th")
self.HandleHtmlOpen(input_line, output_stream, "th", {}, False)
elif self._table_status == "rowstart":
# First row cell.
self.HandleHtmlOpen(input_line, output_stream, "tr", {}, False)
self.HandleHtmlOpen(input_line, output_stream, "td", {}, False)
self._table_status = "row"
elif self._table_status == "row":
# Row cell. Close the previous cell, open the next one.
self.HandleHtmlClose(input_line, output_stream, "td")
self.HandleHtmlOpen(input_line, output_stream, "td", {}, False)
else:
self._Write("|", output_stream)
def HandleTableRowEnd(self, input_line, output_stream):
"""Handle the output for a table row end.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._in_html:
if self._table_status == "header":
# Closing header. Close the previous cell and header, start the body.
self.HandleHtmlClose(input_line, output_stream, "th")
self.HandleHtmlClose(input_line, output_stream, "thead")
self.HandleHtmlOpen(input_line, output_stream, "tbody", {}, False)
elif self._table_status == "row":
# Closing row. Close the previous cell and row.
self.HandleHtmlClose(input_line, output_stream, "td")
self.HandleHtmlClose(input_line, output_stream, "tr")
self._table_status = "rowstart"
else:
self._Write("|", output_stream)
def HandleTableClose(self, input_line, output_stream):
"""Handle the output for a table end.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._in_html:
# HandleTableRowEnd will have been called by this point.
# All we need to do is close the body and table.
self.HandleHtmlClose(input_line, output_stream, "tbody")
self.HandleHtmlClose(input_line, output_stream, "table")
self._table_status = None
def HandleTableHeader(self, input_line, output_stream, columns):
"""Handle the output for starting a table header.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
columns: Column sizes.
"""
if self._in_html:
return
self.HandleText(input_line, output_stream, "\n")
for column_width in columns:
self.HandleTableCellBorder(input_line, output_stream)
# Wiki tables are left-aligned, which takes one character to specify.
self._Write(u":{0}".format("-" * (column_width - 1)), output_stream)
self.HandleTableCellBorder(input_line, output_stream)
def HandleLink(self, input_line, output_stream, target, description):
"""Handle the output of a link.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
target: The target URL of the link.
description: The description for the target.
"""
# There are six cases to handle in general:
# 1. Image link with image description:
# Link to image, using image from description as content.
# 2. Image link with non-image description:
# Link to image, using description text as content.
# 3. Image link with no description:
# Inline image.
# 4. URL link with image description:
# Link to URL, using image from description as content.
# 5. URL link with non-image description:
# Link to URL, using description text as content.
# 6. URL link with no description:
# Link to URL, using URL as content.
# Only in case 3 is no actual link present.
is_image = target.endswith(self._IMAGE_EXTENSIONS)
is_image_description = (description and
description.startswith(self._IMAGE_URL_SCHEMAS) and
description.endswith(self._IMAGE_EXTENSIONS))
if self._in_html:
self._PrintHtmlWarning(input_line, "Link")
# Handle inline image case.
if is_image and not description:
self.HandleHtmlOpen(
input_line,
output_stream,
"img",
{"src": target},
True)
else:
# Handle link cases.
self.HandleHtmlOpen(
input_line,
output_stream,
"a",
{"href": target},
False)
if is_image_description:
self.HandleHtmlOpen(
input_line,
output_stream,
"img",
{"src": description},
True)
else:
description = description or target
self._Write(cgi.escape(description), output_stream)
self.HandleHtmlClose(input_line, output_stream, "a")
else:
# If description is None, this means that only the URL was given. We'd
# like to let GFM auto-link it, because it's prettier. However, while Wiki
# syntax would auto-link a variety of URL schemes, GFM only supports http
# and https. In other cases and in the case of images, we explicitly link.
is_autolinkable = target.startswith(self._GFM_AUTO_URL_SCHEMAS)
autolink = (description is None) and is_autolinkable and (not is_image)
if autolink:
self._Write(target, output_stream)
else:
# If the descriptive text looks like an image URL, Wiki syntax would
# make the link description an inlined image. We do this by setting
# the output description to the syntax used to inline an image.
if is_image_description:
description = u"".format(description)
elif description:
description = self._Escape(description)
else:
description = target
is_image_description = is_image
# Prefix ! if linking to an image without a text description.
prefix = "!" if is_image and is_image_description else ""
output = u"{0}[{1}]({2})".format(prefix, description, target)
self._Write(output, output_stream)
def HandleWiki(self, input_line, output_stream, target, text):
"""Handle the output of a wiki link.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
target: The target URL of the link.
text: The description for the target.
"""
# A wiki link is just like a regular link, except under the wiki directory.
# At this point we make the text equal to the original target if unset.
# We do however append ".md", assuming the wiki files now have that extension.
self.HandleLink(input_line, output_stream, target + ".md", text or target)
def HandleIssue(self, input_line, output_stream, prefix, issue):
"""Handle the output for an auto-linked issue.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
prefix: The text that came before the issue number.
issue: The issue number.
"""
handled = False
# Preferred handler is to map the Google Code issue to a GitHub issue.
if self._issue_map and issue in self._issue_map:
migrated_issue_url = self._issue_map[issue]
migrated_issue = migrated_issue_url.rsplit("/", 1)[1]
self.HandleLink(
input_line,
output_stream,
migrated_issue_url,
u"{0}{1}".format(prefix, migrated_issue))
handled = True
instructions = ("In the output, it has been linked to the migrated issue "
"on GitHub: {0}. Please verify this issue on GitHub "
"corresponds to the original issue on Google Code. "
.format(migrated_issue))
elif self._issue_map:
instructions = ("However, it was not found in the issue migration map; "
"please verify that this issue has been correctly "
"migrated to GitHub and that the issue mapping is put "
"in the issue migration map file. ")
else:
instructions = ("However, no issue migration map was specified. You "
"can use issue_migration.py to migrate your Google "
"Code issues to GitHub, and supply the resulting issue "
"migration map file to this converter. Your old issues "
"will be auto-linked to your migrated issues. ")
# If we couldn't handle it in the map, try linking to the old issue.
if not handled and self._project:
old_link = ("https://code.google.com/p/{0}/issues/detail?id={1}"
.format(self._project, issue))
self.HandleLink(
input_line,
output_stream,
old_link,
u"{0}{1}".format(prefix, issue))
handled = True
instructions += ("As a placeholder, the text has been modified to "
"link to the original Google Code issue page:\n\t{0}"
.format(old_link))
elif not handled:
instructions += ("Additionally, because no project name was specified "
"the issue could not be linked to the original Google "
"Code issue page.")
# Couldn't map it to GitHub nor could we link to the old issue.
if not handled:
output = u"{0}{1} (on Google Code)".format(prefix, issue)
self._Write(output, output_stream)
handled = True
instructions += ("The auto-link has been removed and the text has been "
"modified from '{0}{1}' to '{2}'."
.format(prefix, issue, output))
self._warning_method(
input_line,
u"Issue {0} was auto-linked. {1}".format(issue, instructions))
def HandleRevision(self, input_line, output_stream, prefix, revision):
"""Handle the output for an auto-linked issue.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
prefix: The text that came before the revision number.
revision: The revision number.
"""
# Google Code only auto-linked revision numbers, not hashes, so
# revision auto-linking cannot be done for the conversion.
if self._project:
old_link = ("https://code.google.com/p/{0}/source/detail?r={1}"
.format(self._project, revision))
self.HandleLink(
input_line,
output_stream,
old_link,
u"{0}{1}".format(prefix, revision))
instructions = ("As a placeholder, the text has been modified to "
"link to the original Google Code source page:\n\t{0}"
.format(old_link))
else:
output = u"{0}{1} (on Google Code)".format(prefix, revision)
self._Write(output, output_stream)
instructions = ("Additionally, because no project name was specified "
"the revision could not be linked to the original "
"Google Code source page. The auto-link has been removed "
"and the text has been modified from '{0}{1}' to '{2}'."
.format(prefix, revision, output))
self._warning_method(
input_line,
"Revision {0} was auto-linked. SVN revision numbers are not sensible "
"in Git; consider updating this link or removing it altogether. {1}"
.format(revision, instructions))
def HandleHtmlOpen(
self,
unused_input_line,
output_stream,
html_tag,
params,
has_end):
"""Handle the output for an opening HTML tag.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
html_tag: The HTML tag name.
params: The parameters for the tag.
has_end: True if the tag was self-closed.
"""
core_params = self._SerializeHtmlParams(params)
core = u"{0}{1}".format(html_tag, core_params)
if has_end:
output = u"<{0} />".format(core)
else:
output = u"<{0}>".format(core)
self._in_html += 1
self._Write(output, output_stream)
self._has_written_text = False
def HandleHtmlClose(self, unused_input_line, output_stream, html_tag):
"""Handle the output for an closing HTML tag.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
html_tag: The HTML tag name.
"""
self._Write(u"</{0}>".format(html_tag), output_stream)
self._in_html -= 1
self._has_written_text = False
def HandleGPlusOpen(self, input_line, output_stream, unused_params):
"""Handle the output for opening a +1 button.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
unused_params: The parameters for the tag.
"""
self._warning_method(
input_line,
"A Google+ +1 button was embedded on this page, but GitHub does not "
"currently support this. Should it become supported in the future, "
"see https://developers.google.com/+/web/+1button/ for more "
"information.\nIt has been removed.")
def HandleGPlusClose(self, unused_input_line, unused_output_stream):
"""Handle the output for closing a +1 button.
Args:
unused_input_line: Current line number being processed.
unused_output_stream: Output Markdown file.
"""
pass
def HandleCommentOpen(self, input_line, output_stream):
"""Handle the output for opening a comment.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._warning_method(
input_line,
"A comment was used in the wiki file, but GitHub does not currently "
"support Markdown or HTML comments. As a work-around, the comment will "
"be placed in a bogus and empty <a> tag.")
self._Write("<a href='Hidden comment: ", output_stream)
self._in_comment = True
def HandleCommentClose(self, unused_input_line, output_stream):
"""Handle the output for closing a comment.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
self._in_comment = False
self._Write("'></a>", output_stream)
def HandleVideoOpen(self, input_line, output_stream, video_id, width, height):
"""Handle the output for opening a video.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
video_id: The video ID to play.
width: Width of the resulting widget.
height: Height of the resulting widget.
"""
self._warning_method(
input_line,
"GFM does not support embedding the YouTube player directly. Instead "
"an image link to the video is being used, maintaining sizing options.")
output = self._VIDEO_TEMPLATE.format(video_id, width, height)
self._Write(output, output_stream)
def HandleVideoClose(self, unused_input_line, output_stream):
"""Handle the output for closing a video.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
# Everything was handled on the open side.
pass
def HandleText(self, unused_input_line, output_stream, text):
"""Handle the output of raw text.
Args:
unused_input_line: Current line number being processed.
output_stream: Output Markdown file.
text: The text to output.
"""
self._Write(text, output_stream)
self._has_written_text = True
def HandleEscapedText(self, input_line, output_stream, text):
"""Handle the output of text, which should be escaped for Markdown.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
text: The text to output.
"""
# If we're in HTML, Markdown isn't processed anyway.
if self._in_html:
self.HandleText(input_line, output_stream, text)
else:
self.HandleText(input_line, output_stream, self._Escape(text))
def _PrintHtmlWarning(self, input_line, kind):
"""Warn about HTML translation being performed.
Args:
input_line: Current line number being processed.
kind: The kind of tag being changed.
"""
self._warning_method(
input_line,
"{0} markup was used within HTML tags. Because GitHub does not "
"support this, the tags have been translated to HTML. Please verify "
u"that the formatting is correct.".format(kind))
def _HandleHtmlListOpen(
self,
input_line,
output_stream,
indentation_level,
kind):
"""Handle the output for opening an HTML list.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
indentation_level: The indentation level for the item.
kind: The kind of list being opened.
"""
# Determine if this is a new list, and if a previous list was closed.
if self._list_tags:
top_tag = self._list_tags[-1]
if top_tag["indent"] != indentation_level:
# Opening a new nested list. Indentation level will always be greater,
# because for it to have gone down, the list would have been closed.
new_list = True
closing = False
elif top_tag["kind"] != kind:
# Closed the previous list, started a new one.
new_list = True
closing = True
else:
# Same list, already opened.
new_list = False
closing = False
else:
new_list = True
closing = False
# If we need to, close the prior list.
if closing:
self._HandleHtmlListClose(input_line, output_stream)
# Grab the tags we'll be using.
list_tag = self._HTML_LIST_TAGS[kind]["ListTag"]
item_tag = self._HTML_LIST_TAGS[kind]["ItemTag"]
# If this is a new list, note it in the stack and open it.
if new_list:
new_tag = {
"indent": indentation_level,
"kind": kind,
}
self._list_tags.append(new_tag)
self._PrintHtmlWarning(input_line, kind)
self.HandleHtmlOpen(input_line, output_stream, list_tag, {}, False)
else:
# Not a new list, close the previously outputted item.
if item_tag:
self.HandleHtmlClose(input_line, output_stream, item_tag)
# Open up a new list item
if item_tag:
self.HandleHtmlOpen(input_line, output_stream, item_tag, {}, False)
def _HandleHtmlListClose(self, input_line, output_stream):
"""Handle the output for closing an HTML list.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
# Fix index error if list_tags is empty.
if len(self._list_tags) == 0:
self._warning_method(input_line, "HtmlListClose without list_tags?")
self._list_tags = [ { "indent": 0, "kind": "Bulleted list" } ]
top_tag = self._list_tags[-1]
kind = top_tag["kind"]
self._list_tags.pop()
# Grab the tags we'll be using.
list_tag = self._HTML_LIST_TAGS[kind]["ListTag"]
item_tag = self._HTML_LIST_TAGS[kind]["ItemTag"]
# Close the previously outputted item and the list.
if item_tag:
self.HandleHtmlClose(input_line, output_stream, item_tag)
self.HandleHtmlClose(input_line, output_stream, list_tag)
def _HandleFormatClose(self, input_line, output_stream, kind):
"""Handle the output of a closing format tag.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
kind: The formatting kind.
"""
if self._format_buffer:
# End redirection.
format_buffer = self._format_buffer[-1]
self._format_buffer.pop()
# Don't do anything if we didn't buffer, or it was only whitespace.
format_buffer = format_buffer.strip()
if not format_buffer:
return
if self._in_html:
tag = self._HTML_FORMAT_TAGS[kind]["HTML"]
self.HandleHtmlOpen(input_line, output_stream, tag, {}, False)
self.HandleText(input_line, output_stream, format_buffer)
self.HandleHtmlClose(input_line, output_stream, tag)
else:
tag = self._HTML_FORMAT_TAGS[kind]["Markdown"]
self._Write(u"{0}{1}{0}".format(tag, format_buffer), output_stream)
else:
self._warning_method(input_line, u"Re-closed '{0}', ignoring.".format(tag))
def _Indent(self, output_stream, indentation_level):
"""Output indentation.
Args:
output_stream: Output Markdown file.
indentation_level: Number of indentations to output.
"""
self._Write(self._SINGLE_INDENTATION * indentation_level, output_stream)
def _Escape(self, text):
"""Escape Wiki text to be suitable in Markdown.
Args:
text: Input Wiki text.
Returns:
Escaped text for Markdown.
"""
text = text.replace("*", r"\*")
text = text.replace("_", r"\_")
# If we find a plugin-like bit of text, escape the angle-brackets.
for plugin_re in [constants.PLUGIN_RE, constants.PLUGIN_END_RE]:
while plugin_re.search(text):
match = plugin_re.search(text)
before_match = text[:match.start()]
after_match = text[match.end():]
escaped_match = match.group(0).replace("<", "<").replace(">", ">")
text = u"{0}{1}{2}".format(before_match, escaped_match, after_match)
# In Markdown, if a newline is preceeded by two spaces it breaks the line.
# For Wiki text, this is not the case, so we strip such endings off.
while text.endswith(" \n"):
text = text[:-len(" \n")] + "\n"
return text
def _SerializeHtmlParams(self, params):
"""Serialize parameters for an HTML tag.
Args:
params: The parameters for the tag.
Returns:
Serialized parameters.
"""
core_params = ""
for name, value in params.items():
if "'" not in value:
quote = "'"
else:
quote = "\""
core_params += u" {0}={1}{2}{1}".format(name, quote, value)
return core_params
def _Write(self, text, output_stream):
"""Write text to the output stream, taking into account any redirection.
Args:
text: Input raw text.
output_stream: Output Markdown file.
"""
if not text:
return
if not self._in_comment and self._in_html:
if self._in_code_block:
text = cgi.escape(text)
if self._in_code_block or self._has_written_text:
text = text.replace("\n", "<br>\n")
if self._in_comment:
text = text.replace("'", "\"")
if self._format_buffer:
# Buffering is occuring, add to buffer.
self._format_buffer[-1] += text
else:
# No buffering occuring, just output it.
output_stream.write(text)
|
the-stack_106_16452
|
"""Main command."""
import os
import sys
from pytest_bdd.scripts import main
PATH = os.path.dirname(__file__)
def test_main(monkeypatch, capsys):
"""Test if main commmand shows help when called without the subcommand."""
monkeypatch.setattr(sys, "argv", ["pytest-bdd"])
monkeypatch.setattr(sys, "exit", lambda x: x)
main()
out, err = capsys.readouterr()
assert "usage: pytest-bdd [-h]" in err
assert "pytest-bdd: error:" in err
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.