filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_32038 | # model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
hm_head_conv_num=2,
wh_head_conv_num=2,
deconv_with_bias=False,
num_classes=81,
use_reg_offset=True,
use_smooth_l1=False,
use_giou=True,
use_truncate_gaussia=True,
use_shortcut=True,
shortcut_cfg=(1, 2, 4),
shortcut_attention=(True, True, True),
use_rep_points=False,
norm_cfg=dict(type='BN'),
wh_weight=0.1,
off_weight=1.,
hm_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=3,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=7.02e-5)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[40, 55])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=40)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=100)
# yapf:enable
# runtime settings
total_epochs = 60
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'centernet_r18_nshortcut124a_giou_h2_128c_tru_5x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_106_32040 | # We must redefine it in Py3k if it's not already there
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
# It seems that the best way is using tokenize.open(): http://code.activestate.com/lists/python-dev/131251/
# (but tokenize.open() is only available for python 3.2)
import tokenize
if hasattr(tokenize, 'open'):
# version 3.2
stream = tokenize.open(file) # @UndefinedVariable
else:
# version 3.0 or 3.1
detect_encoding = tokenize.detect_encoding(open(file, mode="rb").readline)
stream = open(file, encoding=detect_encoding[0])
try:
contents = stream.read()
finally:
stream.close()
# execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents + "\n", file, 'exec'), glob, loc)
|
the-stack_106_32041 | import argparse
import logging
import os
from typing import List, Optional, Text, Dict, Union, Any
from rasa.cli import SubParsersAction
import rasa.shared.data
from rasa.shared.exceptions import YamlException
import rasa.shared.utils.io
import rasa.shared.utils.cli
from rasa.cli.arguments import test as arguments
from rasa.core.constants import (
FAILED_STORIES_FILE,
SUCCESSFUL_STORIES_FILE,
STORIES_WITH_WARNINGS_FILE,
)
from rasa.shared.constants import (
CONFIG_SCHEMA_FILE,
DEFAULT_E2E_TESTS_PATH,
DEFAULT_CONFIG_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_DATA_PATH,
DEFAULT_RESULTS_PATH,
DEFAULT_DOMAIN_PATH,
)
import rasa.shared.utils.validation as validation_utils
import rasa.cli.utils
import rasa.utils.common
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all test parsers.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
test_parser = subparsers.add_parser(
"test",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Tests Rasa models using your test NLU data and stories.",
)
arguments.set_test_arguments(test_parser)
test_subparsers = test_parser.add_subparsers()
test_core_parser = test_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Tests Rasa Core models using your test stories.",
)
arguments.set_test_core_arguments(test_core_parser)
test_nlu_parser = test_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Tests Rasa NLU models using your test NLU data.",
)
arguments.set_test_nlu_arguments(test_nlu_parser)
test_core_parser.set_defaults(func=run_core_test)
test_nlu_parser.set_defaults(func=run_nlu_test)
test_parser.set_defaults(func=test, stories=DEFAULT_E2E_TESTS_PATH)
def _print_core_test_execution_info(args: argparse.Namespace) -> None:
output = args.out or DEFAULT_RESULTS_PATH
if args.successes:
rasa.shared.utils.cli.print_info(
f"Successful stories written to "
f"'{os.path.join(output, SUCCESSFUL_STORIES_FILE)}'"
)
if not args.no_errors:
rasa.shared.utils.cli.print_info(
f"Failed stories written to '{os.path.join(output, FAILED_STORIES_FILE)}'"
)
if not args.no_warnings:
rasa.shared.utils.cli.print_info(
f"Stories with prediction warnings written to "
f"'{os.path.join(output, STORIES_WITH_WARNINGS_FILE)}'"
)
def run_core_test(args: argparse.Namespace) -> None:
"""Run core tests."""
from rasa.model_testing import (
test_core_models_in_directory,
test_core,
test_core_models,
)
stories = rasa.cli.utils.get_validated_path(
args.stories, "stories", DEFAULT_DATA_PATH
)
output = args.out or DEFAULT_RESULTS_PATH
args.errors = not args.no_errors
args.warnings = not args.no_warnings
rasa.shared.utils.io.create_directory(output)
if isinstance(args.model, list) and len(args.model) == 1:
args.model = args.model[0]
if args.model is None:
rasa.shared.utils.cli.print_error(
"No model provided. Please make sure to specify "
"the model to test with '--model'."
)
return
if isinstance(args.model, str):
model_path = rasa.cli.utils.get_validated_path(
args.model, "model", DEFAULT_MODELS_PATH
)
if args.evaluate_model_directory:
test_core_models_in_directory(
args.model, stories, output, use_conversation_test_files=args.e2e
)
else:
test_core(
model=model_path,
stories=stories,
output=output,
additional_arguments=vars(args),
use_conversation_test_files=args.e2e,
)
else:
test_core_models(
args.model, stories, output, use_conversation_test_files=args.e2e
)
_print_core_test_execution_info(args)
async def run_nlu_test_async(
config: Optional[Union[Text, List[Text]]],
data_path: Text,
models_path: Text,
output_dir: Text,
cross_validation: bool,
percentages: List[int],
runs: int,
no_errors: bool,
all_args: Dict[Text, Any],
) -> None:
"""Runs NLU tests.
Args:
all_args: all arguments gathered in a Dict so we can pass it as one argument
to other functions.
config: it refers to the model configuration file. It can be a single file or
a list of multiple files or a folder with multiple config files inside.
data_path: path for the nlu data.
models_path: path to a trained Rasa model.
output_dir: output path for any files created during the evaluation.
cross_validation: indicates if it should test the model using cross validation
or not.
percentages: defines the exclusion percentage of the training data.
runs: number of comparison runs to make.
no_errors: indicates if incorrect predictions should be written to a file
or not.
"""
from rasa.model_testing import (
compare_nlu_models,
perform_nlu_cross_validation,
test_nlu,
)
data_path = rasa.cli.utils.get_validated_path(data_path, "nlu", DEFAULT_DATA_PATH)
test_data_importer = TrainingDataImporter.load_from_dict(
training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH,
)
nlu_data = test_data_importer.get_nlu_data()
output = output_dir or DEFAULT_RESULTS_PATH
all_args["errors"] = not no_errors
rasa.shared.utils.io.create_directory(output)
if config is not None and len(config) == 1:
config = os.path.abspath(config[0])
if os.path.isdir(config):
config = rasa.shared.utils.io.list_files(config)
if isinstance(config, list):
logger.info(
"Multiple configuration files specified, running nlu comparison mode."
)
config_files = []
for file in config:
try:
validation_utils.validate_yaml_schema(
rasa.shared.utils.io.read_file(file), CONFIG_SCHEMA_FILE,
)
config_files.append(file)
except YamlException:
rasa.shared.utils.io.raise_warning(
f"Ignoring file '{file}' as it is not a valid config file."
)
continue
await compare_nlu_models(
configs=config_files,
test_data=nlu_data,
output=output,
runs=runs,
exclusion_percentages=percentages,
)
elif cross_validation:
logger.info("Test model using cross validation.")
config = rasa.cli.utils.get_validated_path(
config, "config", DEFAULT_CONFIG_PATH
)
config_importer = TrainingDataImporter.load_from_dict(config_path=config)
config_dict = config_importer.get_config()
perform_nlu_cross_validation(config_dict, nlu_data, output, all_args)
else:
model_path = rasa.cli.utils.get_validated_path(
models_path, "model", DEFAULT_MODELS_PATH
)
test_nlu(model_path, data_path, output, all_args)
def run_nlu_test(args: argparse.Namespace) -> None:
"""Runs NLU tests.
Args:
args: the parsed CLI arguments for 'rasa test nlu'.
"""
rasa.utils.common.run_in_loop(
run_nlu_test_async(
args.config,
args.nlu,
args.model,
args.out,
args.cross_validation,
args.percentages,
args.runs,
args.no_errors,
vars(args),
)
)
def test(args: argparse.Namespace) -> None:
"""Run end-to-end tests."""
setattr(args, "e2e", True)
run_core_test(args)
run_nlu_test(args)
|
the-stack_106_32042 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Simulates the Arduino communication stream as expected by demo A and C.
"""
__author__ = "Dennis van Gils"
__authoremail__ = "[email protected]"
__url__ = "https://github.com/Dennis-van-Gils/DvG_Arduino_PyQt_multithread_demo"
__date__ = "11-08-2020"
__version__ = "0.0.1"
# pylint: disable=unused-argument
import time
import numpy as np
from PyQt5 import QtCore
class FakeArduino:
def __init__(
self, *args, **kwargs,
):
self.serial_settings = dict()
self.name = "FakeArd"
self.long_name = "FakeArduino"
self.is_alive = True
self.mutex = QtCore.QMutex()
self.wave_freq = 0.3 # [Hz]
self.wave_type = "sine"
def write(self, msg, *args, **kwargs) -> bool:
self.wave_type = msg
return True
def query_ascii_values(self, *args, **kwargs) -> tuple:
t = time.perf_counter()
if self.wave_type == "sine":
wave = np.sin(2 * np.pi * self.wave_freq * t)
elif self.wave_type == "square":
wave = 1 if np.mod(self.wave_freq * t, 1.0) > 0.5 else -1
elif self.wave_type == "sawtooth":
wave = 2 * np.mod(self.wave_freq * t, 1.0) - 1
return (True, (t, wave))
def close(self):
pass
def auto_connect(self, *args, **kwargs) -> bool:
return True
|
the-stack_106_32044 | # Copyright (c) 2016 The Pybind Development Team, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# You are under no obligation whatsoever to provide any bug fixes, patches, or
# upgrades to the features, functionality or performance of the source code
# ("Enhancements") to anyone; however, if you choose to make your Enhancements
# available either publicly, or directly to the author of this software, without
# imposing a separate written license agreement for such Enhancements, then you
# hereby grant the following license: a non-exclusive, royalty-free perpetual
# license to install, use, modify, prepare derivative works, incorporate into
# other computer software, distribute, and sublicense such enhancements or
# derivative works thereof, in binary and source code form.
#
# This setup.py was taken from https://github.com/pybind/cmake_example
# and modified accordingly. Essentially, we assume the underlying CMake
# project builds as-is with default arguments.
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.5.0':
raise RuntimeError("CMake >= 3.5.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DIMAGECAPTURE_PYTHON_OUTPUT_DIRECTORY=' + extdir,
'-DIMAGECAPTURE_PYTHON_MODULE_NAME=' + self.distribution.get_name(),
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='imagecapturepython', # must match python module name in your c++ code.
version='0.0.1', # must match CMakeLists.txt
author='Myself',
author_email='[email protected]',
description='A software package for capturing images from input devices and rendering to vide outputs.',
long_description='',
ext_modules=[CMakeExtension('CMakeCatchTemplate')], # must match top-level folder name.
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
|
the-stack_106_32046 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy537.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_106_32048 | # -*- coding: utf-8 -*-
__author__ = 'mika hämäläinen'
pronouns = {"SG1" : "minä", "SG2" : "sinä", "SG3" : "se", "PL1" : "me", "PL2": "te", "PL3": "ne"}
def pronoun(person, human=True):
if human and person is "SG3":
return "hän"
if human and person is "PL3":
return "he"
if person in pronouns:
return pronouns[person]
else:
return None
def is_personal_pronoun(p_pronoun):
if p_pronoun in pronouns.values():
return True
else:
return False
|
the-stack_106_32049 | #!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from setuptools import setup, find_packages
import os
import sys
"""
setup module for dysmsapi.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkdysmsapi"
NAME = "aliyun-python-sdk-dysmsapi"
DESCRIPTION = "The dysmsapi module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "[email protected]"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
requires = []
if sys.version_info < (3, 3):
requires.append("aliyun-python-sdk-core>=2.0.2")
else:
requires.append("aliyun-python-sdk-core-v3>=2.3.5")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","dysmsapi"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=requires,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
]
)
|
the-stack_106_32051 | """SCons.Subst
SCons string substitution.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Subst.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import collections
import re
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = [_f for _f in excepts if _f]
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError(target[0], msg)
else:
raise SCons.Errors.UserError(msg)
class Literal(object):
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
def __eq__(self, other):
if not isinstance(other, Literal):
return False
return self.lstr == other.lstr
def __neq__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.lstr)
class SpecialAttrWrapper(object):
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(collections.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
collections.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(mylist, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return list(map(escape, mylist))
class NLWrapper(object):
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
mylist = self.list
if mylist is None:
mylist = []
elif not is_Sequence(mylist):
mylist = [mylist]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(list(map(self.func, mylist)))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __getslice__(self, i, j):
nl = self.nl._create_nodelist()
i = max(i, 0); j = max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source(object):
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError("NodeList has no attribute: %s" % attr)
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
class StringSubber(object):
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
# In this case keep the double $'s which we'll later
# swap for a single dollar sign as we need to retain
# this information to properly avoid matching "$("" when
# the actual text was "$$("" (or "$)"" when "$$)"" )
return '$$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or '.' in key:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], old_s)
elif s is None:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
class ListSubber(collections.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
collections.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x: self.append(x)
else:
self.add_strip = lambda x: None
self.in_strip = None
self.next_line()
def expanded(self, s):
"""Determines if the string s requires further expansion.
Due to the implementation of ListSubber expand will call
itself 2 additional times for an already expanded string. This
method is used to determine if a string is already fully
expanded and if so exit the loop early to prevent these
recursive calls.
"""
if not is_String(s) or isinstance(s, CmdStringHolder):
return False
s = str(s) # in case it's a UserString
return _separate_args.findall(s) is None
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], old_s)
elif s is None:
return
# If the string is already full expanded there's no
# need to continue recursion.
if self.expanded(s):
self.append(s)
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
collections.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
# Note the pattern below only matches $( or $) when there is no
# preceeding $. (Thus the (?<!\$))
_rm_split = re.compile(r'(?<!\$)(\$[()])')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _rm_split ]
def _rm_list(list):
return [l for l in list if l not in ('$(', '$)')]
def _remove_list(list):
result = []
depth = 0
for l in list:
if l == '$(':
depth += 1
elif l == '$)':
depth -= 1
if depth < 0:
break
elif depth == 0:
result.append(l)
if depth != 0:
return None
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if (isinstance(strSubst, str) and '$' not in strSubst) or isinstance(strSubst, CmdStringHolder):
return strSubst
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
res = result
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
if mode == SUBST_SIG:
result = _list_remove[mode](remove.split(result))
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + res)
result = ' '.join(result)
else:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
# Now replace escaped $'s currently "$$"
# This is needed because we now retain $$ instead of
# replacing them during substition to avoid
# improperly trying to escape "$$(" as being "$("
result = result.replace('$$','$')
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + str(res))
return result
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return ' '.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_32053 | import unittest
import sys
import pexpect
from vncdotool import rfb
class TestLogEvents(object):
def setUp(self):
cmd = 'vncev -rfbport 5999 -rfbwait 1000'
self.server = pexpect.spawn(cmd, timeout=2)
self.server.logfile_read = sys.stdout
cmd = 'vnclog --listen 1842 -s :99 -'
self.recorder = pexpect.spawn(cmd, timeout=2)
self.recorder.logfile_read = sys.stdout
def tearDown(self):
self.server.terminate(force=True)
if self.recorder:
self.recorder.terminate(force=True)
def run_vncdo(self, commands):
cmd = 'vncdo -s localhost::1842 ' + commands
vnc = pexpect.spawn(cmd, timeout=2)
vnc.logfile_read = sys.stdout
retval = vnc.wait()
assert retval == 0, (retval, str(vnc))
def assertKeyDown(self, key):
down = '^.*down:\s+\(%s\)\r' % hex(key)
self.server.expect(down)
def assertKeyUp(self, key):
up = '^.*up:\s+\(%s\)\r' % hex(key)
self.server.expect(up)
def assertMouse(self, x, y, buttonmask):
output = '^.*Ptr: mouse button mask %s at %d,%d' % (hex(buttonmask), x, y)
self.server.expect(output)
def test_key_alpha(self):
self.run_vncdo('key z')
self.assertKeyDown(ord('z'))
self.assertKeyUp(ord('z'))
self.recorder.expect('keydown z')
self.recorder.expect('keyup z')
def test_key_ctrl_a(self):
self.run_vncdo('key ctrl-a')
self.assertKeyDown(rfb.KEY_ControlLeft)
self.assertKeyDown(ord('a'))
self.assertKeyUp(rfb.KEY_ControlLeft)
self.assertKeyUp(ord('a'))
def test_mouse(self):
self.run_vncdo('move 111 222 click 1')
self.assertMouse(111, 222, 1)
self.recorder.expect('move 111 222')
self.recorder.expect('click 1')
|
the-stack_106_32054 | #!/usr/bin/env python
# coding: utf-8
import librosa
import logging
import numpy as np
from scipy.spatial import distance
from scipy import signal
from scipy.ndimage import filters
from msaf.algorithms.interface import SegmenterInterface
import msaf.utils as U
def median_filter(X, M=8):
"""Median filter along the first axis of the feature matrix X."""
for i in range(X.shape[1]):
X[:, i] = filters.median_filter(X[:, i], size=M)
return X
def gaussian_filter(X, M=8, axis=0):
"""Gaussian filter along the first axis of the feature matrix X."""
for i in range(X.shape[axis]):
if axis == 1:
X[:, i] = filters.gaussian_filter(X[:, i], sigma=M / 2.)
elif axis == 0:
X[i, :] = filters.gaussian_filter(X[i, :], sigma=M / 2.)
return X
def compute_gaussian_krnl(M):
"""Creates a gaussian kernel following Serra's paper."""
g = signal.gaussian(M, M / 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 1:] = -G[:M // 2, M // 1:]
return G
def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= float(D.max())
return 1 - D
def compute_nc(X):
"""Computes the novelty curve from the structural features."""
N = X.shape[0]
# nc = np.sum(np.diff(X, axis=0), axis=1) # Difference between SF's
nc = np.zeros(N)
for i in range(N - 1):
nc[i] = distance.euclidean(X[i, :], X[i + 1, :])
# Normalize
nc += np.abs(nc.min())
nc /= float(nc.max())
return nc
def pick_peaks(nc, L=16, offset_denom=0.1):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() * float(offset_denom)
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
#import pylab as plt
#plt.plot(nc)
#plt.plot(th)
#plt.show()
# th = np.ones(nc.shape[0]) * nc.mean() - 0.08
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
return peaks
def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L
def embedded_space(X, m, tau=1):
"""Time-delay embedding with m dimensions and tau delays."""
N = X.shape[0] - int(np.ceil(m))
Y = np.zeros((N, int(np.ceil(X.shape[1] * m))))
for i in range(N):
# print X[i:i+m,:].flatten().shape, w, X.shape
# print Y[i,:].shape
rem = int((m % 1) * X.shape[1]) # Reminder for float m
Y[i, :] = np.concatenate((X[i:i + int(m), :].flatten(),
X[i + int(m), :rem]))
return Y
class Segmenter(SegmenterInterface):
"""
This script identifies the boundaries of a given track using the Serrà
method:
Serrà, J., Müller, M., Grosche, P., & Arcos, J. L. (2012). Unsupervised
Detection of Music Boundaries by Time Series Structure Features.
In Proc. of the 26th AAAI Conference on Artificial Intelligence
(pp. 1613–1619).Toronto, Canada.
"""
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated times for the segment boundaries in frame indeces.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Structural Features params
Mp = self.config["Mp_adaptive"] # Size of the adaptive threshold for
# peak picking
od = self.config["offset_thres"] # Offset coefficient for adaptive
# thresholding
M = self.config["M_gaussian"] # Size of gaussian kernel in beats
m = self.config["m_embedded"] # Number of embedded dimensions
k = self.config["k_nearest"] # k*N-nearest neighbors for the
# recurrence plot
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config["bound_norm_feats"])
# Check size in case the track is too short
if F.shape[0] > 20:
if self.framesync:
red = 0.1
F_copy = np.copy(F)
F = librosa.util.utils.sync(
F.T, np.linspace(0, F.shape[0], num=F.shape[0] * red),
pad=False).T
# Emedding the feature space (i.e. shingle)
E = embedded_space(F, m)
# plt.imshow(E.T, interpolation="nearest", aspect="auto"); plt.show()
# Recurrence matrix
R = librosa.segment.recurrence_matrix(
E.T,
k=k * int(F.shape[0]),
width=1, # zeros from the diagonal
metric="euclidean",
sym=True).astype(np.float32)
# Circular shift
L = circular_shift(R)
#plt.imshow(L, interpolation="nearest", cmap=plt.get_cmap("binary"))
#plt.show()
# Obtain structural features by filtering the lag matrix
SF = gaussian_filter(L.T, M=M, axis=1)
SF = gaussian_filter(L.T, M=1, axis=0)
# plt.imshow(SF.T, interpolation="nearest", aspect="auto")
#plt.show()
# Compute the novelty curve
nc = compute_nc(SF)
# Find peaks in the novelty curve
est_bounds = pick_peaks(nc, L=Mp, offset_denom=od)
# Re-align embedded space
est_bounds = np.asarray(est_bounds) + int(np.ceil(m / 2.))
if self.framesync:
est_bounds /= red
F = F_copy
else:
est_bounds = []
# Add first and last frames
est_idxs = np.concatenate(([0], est_bounds, [F.shape[0] - 1]))
est_idxs = np.unique(est_idxs)
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Empty labels
est_labels = np.ones(len(est_idxs) - 1) * - 1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
# plt.figure(1)
# plt.plot(nc);
# [plt.axvline(p, color="m", ymin=.6) for p in est_bounds]
# [plt.axvline(b, color="b", ymax=.6, ymin=.3) for b in brian_bounds]
# [plt.axvline(b, color="g", ymax=.3) for b in ann_bounds]
# plt.show()
return est_idxs, est_labels
|
the-stack_106_32055 | from . import support
from . import tuner
from .learner import Learner
from .random import Random
from .session import Session
import json
import numpy as np
import os
import threading
class Agent:
def __init__(self, session, semaphore, config):
self.session = session
self.semaphore = semaphore
self.scores = Agent._restore(config.output.path)
self.output_path = config.output.path
self.lock = threading.Lock()
self.done = threading.Lock()
def collect(self, step_count):
with self.done:
return self.scores[step_count]
def submit(self, step_count):
with self.lock:
if step_count in self.scores:
return
self.scores[step_count] = None
self.done.acquire()
worker = threading.Thread(target=self._run, args=(step_count,),
daemon=True)
worker.start()
def _restore(path):
scores = {}
for path in support.scan(path, 'meta-*.json'):
meta = json.loads(open(path).read())
scores[meta['step_count']] = meta['score']
support.log(Agent, 'Score: {}', path)
return scores
def _run(self, step_count):
with self.semaphore:
with self.lock:
last_step_count = 0
for key in self.scores:
if self.scores[key] is None:
continue
if key > last_step_count:
last_step_count = key
assert(last_step_count < step_count)
support.log(self, 'Learning start: {}, stop: {}',
last_step_count, step_count)
self.session.run_training(step_count - last_step_count)
score = np.sum(self.session.run_validation()['MSE'])
Agent._save(self.output_path, step_count, score)
self.session.run_saving()
with self.lock:
self.scores[step_count] = score
support.log(self, 'Learning stop: {}, score: {}',
step_count, score)
self.done.release()
def _save(path, step_count, score):
path = os.path.join(path, 'meta-{}.json'.format(step_count))
with open(path, 'w') as file:
file.write(json.dumps({
'step_count': step_count,
'score': score,
}))
class Explorer:
def __init__(self, input, config):
self.input = input
self.config = config
self.tuner = getattr(tuner, config.tuner.name)
self.tuner = self.tuner(**config.tuner.options)
self.scale = config.max_step_count / self.tuner.resource
self.sampler = Sampler(config.sampler)
self.semaphore = threading.BoundedSemaphore(config.concurrent_count)
self.agents = {}
def configure(self, case, restore=True):
key = support.tokenize(case)
config = self.config.copy()
config.output.restore = restore
config.output.path = os.path.join(config.output.path, key)
for key in case:
_adjust(config, key, case[key])
return config
def run(self):
case, resource, score = self.tuner.run(self._generate, self._assess)
step_count = int(round(self.scale * resource))
support.log(self, 'Best case: {}, step: {}, score: {}',
case, step_count, score)
return (case, step_count)
def _assess(self, resource, cases):
step_count = int(round(self.scale * resource))
support.log(self, 'Assess cases: {}, stop: {}',
len(cases), step_count)
agents = []
for case in cases:
key = support.tokenize(case)
agent = self.agents.get(key)
if agent is None:
config = self.configure(case)
learner = Learner(config.learner.candidate)
session = Session(self.input, learner, config)
agent = Agent(session, self.semaphore, config)
self.agents[key] = agent
agent.submit(step_count)
agents.append(agent)
return [agent.collect(step_count) for agent in agents]
def _generate(self, count):
support.log(self, 'Generate cases: {}', count)
return [self.sampler.get() for _ in range(count)]
class Sampler:
def __init__(self, config):
self.parameters = config
support.log(self, 'Cases: {}', self.case_count)
@property
def case_count(self):
return np.prod([len(self.parameters[n]) for n in self.parameters])
def get(self):
case = {}
for key in sorted(self.parameters.keys()):
chosen = Random.get().randint(len(self.parameters[key]))
case[key] = self.parameters[key][chosen]
return case
def _adjust(config, key, value):
if key == 'dropout_rate':
config.learner.candidate.dropout.options.update({
'input_keep_prob': 1 - value[0],
'output_keep_prob': 1 - value[1],
})
elif key == 'layer_count':
config.learner.candidate.layer_count = value
elif key == 'learning_rate':
config.teacher.trainer.optimizer.options.learning_rate = value
elif key == 'unit_count':
config.learner.candidate.unit_count = value
elif key == 'use_peepholes':
config.learner.candidate.cell.options.use_peepholes = value
else:
assert(False)
|
the-stack_106_32056 | from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
cfg = __C
# Dataset name: flowers, birds
__C.DATASET_NAME = 'coco'
__C.CONFIG_NAME = ''
__C.DATA_DIR = ''
__C.GPU_ID = 0
__C.CUDA = True
__C.WORKERS = 6
__C.RNN_TYPE = 'LSTM' # 'GRU'
__C.B_VALIDATION = False
__C.TREE = edict()
__C.TREE.BRANCH_NUM = 3
__C.TREE.BASE_SIZE = 64
# Training options
__C.TRAIN = edict()
__C.TRAIN.BATCH_SIZE = 64
__C.TRAIN.MAX_EPOCH = 600
__C.TRAIN.SNAPSHOT_INTERVAL = 2000
__C.TRAIN.DISCRIMINATOR_LR = 2e-4
__C.TRAIN.MEMT_LR = 2e-4
__C.TRAIN.GENERATOR_LR = 2e-4
__C.TRAIN.ENCODER_LR = 2e-4
__C.TRAIN.RNN_GRAD_CLIP = 0.25
__C.TRAIN.FLAG = True
__C.TRAIN.NET_E = ''
__C.TRAIN.NET_G = ''
__C.TRAIN.NET_T = ''
__C.TRAIN.B_NET_D = True
__C.TRAIN.SMOOTH = edict()
__C.TRAIN.SMOOTH.GAMMA1 = 5.0
__C.TRAIN.SMOOTH.GAMMA3 = 10.0
__C.TRAIN.SMOOTH.GAMMA2 = 5.0
__C.TRAIN.SMOOTH.LAMBDA = 1.0
# Modal options
__C.GAN = edict()
__C.GAN.DF_DIM = 64
__C.GAN.GF_DIM = 128
__C.GAN.Z_DIM = 100
__C.GAN.CONDITION_DIM = 100
__C.GAN.R_NUM = 2
__C.GAN.B_ATTENTION = True
__C.GAN.B_DCGAN = False
__C.TEXT = edict()
__C.TEXT.CAPTIONS_PER_IMAGE = 10
__C.TEXT.EMBEDDING_DIM = 256
__C.TEXT.WORDS_NUM = 18
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
|
the-stack_106_32058 | from tkinter import *
class GUI:
def __init__(self, master):
frame = Frame(master)
frame.pack()
frame.configure(background='white')
self.button1 = Button(frame, bg="white")
self.button1.config(image=photo1)
self.button2 = Button(frame,bg="white")
self.button2.config(image=photo2)
self.button1.grid(row=0,column=0)
self.button2.grid(row=0, column=1)
root= Tk()
photo = PhotoImage(file="C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Games1.png")
label = Label(root,image=photo)
photoB = PhotoImage(file="C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Back.png")
photoE = PhotoImage(file="C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Exit.png")
photo1 = PhotoImage(file="C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/tic-tac-toe.png")
photo2 = PhotoImage(file="C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Color.png")
back = Button(root, bg="white")
back.config(image=photoB)
quitButton = Button(root, command=root.quit, bg="white")
quitButton.config(image=photoE)
back.pack(side="left",anchor=NW)
quitButton.pack(side="right", anchor=NE)
label.pack(side="top", anchor=N)
b= GUI(root)
root.geometry("1000x920")
root.title("Intelligent Fellow")
root.configure(background='white')
root.mainloop() |
the-stack_106_32060 | # Test script for interaction based messages using DiscordAPI compoments
from discord.ext import commands
import discord
import discord_slash.utils.manage_components as utils
from discord_slash.model import ButtonStyle
from discord_slash.cog_ext import cog_component
class Components(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="test", help="test command for discord components")
async def test(self, ctx):
buttons = [
utils.create_button(style=ButtonStyle.green, label="A green button"),
utils.create_button(style=ButtonStyle.blue, label="A blue button")
]
action_row = utils.create_actionrow(*buttons)
await ctx.send("Button test message", components=[action_row])
@cog_component() #figure how to make component callback in cogs
async def hello(self, ctx):
await ctx.edit_origin(content="Button pressed")
def setup(bot):
bot.add_cog(Components(bot))
|
the-stack_106_32062 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.admin import User
from tool.models import Project as ProjectModel, ProjectMember, GroupAccess
from tool.forms import UserCreateForm, UserEditForm, ProjectCreationForm, ProjectEditForm
from tool.utilies import *
import xlrd, json
import numpy as np
from scipy import optimize
from django.core import serializers
from django.http import JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib import messages
import math
class Funcs:
def count_number_project_status_point(pd, project_status):
for i in range(len(project_status)):
if project_status[i]['AT'] == pd:
return i+1
return len(project_status)
def get_index_of_evaluation(project_status, number_project_status_point, evaluation_point):
for i in range(number_project_status_point):
if project_status[i]['AT'] >= evaluation_point:
return i - 1
break
return number_project_status_point - 1
def init(X, Y , pd, budget, project_status, evaluation_index, number_project_status_point):
for i in range(number_project_status_point):
X[i] = project_status[i]['AT']/(pd*1.0)
if i > evaluation_index:
project_status[i]['EV'] = project_status[i]['PV']
project_status[i]['AC'] = project_status[i]['PV']
Y[i] = project_status[i]['AC']/(budget*1.0)
# for gomperzt
def gomperzt_func(parameters,xdata):
a = parameters[0]
b = parameters[1]
y = parameters[2]
return a * np.exp(-np.exp(b - y*xdata))
# for logistic
def logistic_func(parameters,xdata):
a = parameters[0]
b = parameters[1]
y = parameters[2]
return a / (1 + np.exp(b - y*xdata))
# for logistic
def bass_func(parameters,xdata):
a = parameters[0]
b = parameters[1]
y = parameters[2]
return a * ( ( 1 - np.exp(-(b + y)*xdata) ) / ( ( 1 + (y / b)*np.exp(-(b + y)*xdata) ) ) )
# for logistic
def weibull_func(parameters,xdata):
a = parameters[0]
b = parameters[1]
y = parameters[2]
return a * (1 - np.exp( -( xdata / y )**b ))
#log-logistic
def log_logistic_func(parameters,xdata):
a = parameters[0]
b = parameters[1]
return ( (xdata/a)**b ) / ( 1 + (xdata/a)**b )
#Compute residuals of y_predicted - y_observed
def residuals(parameters,x_data,y_observed,func):
return func(parameters,x_data) - y_observed
def getES(project_status, evaluation_index):
# example: evaluationPoint = 9 -> current EV is: EV[8]. i = 0..8
EV = project_status[evaluation_index]['EV']
t = [ n for n,item in enumerate(project_status) if item['PV']>EV ][0] - 1
return round(project_status[t]['AT'] + (EV - project_status[t]['PV'])/(project_status[t+1]['PV'] - project_status[t]['PV']), 2)
def getSPI(project_status, evaluation_index):
return round(project_status[evaluation_index]['EV']/project_status[evaluation_index]['PV'], 2)
def getCPI(project_status, evaluation_index):
return round(project_status[evaluation_index]['EV']/project_status[evaluation_index]['AC'], 2)
def getSPIt(project_status, evaluation_index):
ES = Funcs.getES(project_status, evaluation_index)
return round(ES/project_status[evaluation_index]['AT'], 2)
def getSCI(SPI, CPI):
return round(SPI*CPI, 2)
def getSCIt(SPIt, CPI):
return round(SPIt*CPI, 2)
def getED(project_status, SPI, evaluation_index):
return round(project_status[evaluation_index]['AT']*SPI, 2)
def getSV(project_status, evaluation_index):
return project_status[evaluation_index]['EV'] - project_status[evaluation_index]['PV']
def getTV(BAC, PD, project_status, evaluation_index):
PVrate = BAC/PD
SV = Funcs.getSV(project_status, evaluation_index)
return round(SV/PVrate, 2)
def getEACtPV1(PD, TV):
return PD - TV
def getEACtPV2(PD, SPI):
return round(PD/SPI, 2)
def getEACtPV3(PD, SCI):
return round(PD/SCI, 2)
# return EACt caculated by ED
def getEACtED(PD, project_status, ED, PF, evaluation_index):
return round(project_status[evaluation_index]['AT'] + (max(PD, project_status[evaluation_index]['AT']) - ED)/PF, 2)
# return EACt caculated by ES
def getEACtES(PD, project_status, ES, PF, evaluation_index):
return round(project_status[evaluation_index]['AT'] + (PD - ES)/PF, 2)
def getCI(CPI, w_cpi, SPI, w_spi):
return CPI*w_cpi + SPI*w_spi
def getCIt(CPI, w_cpi, SPIt, w_spit):
return CPI*w_cpi + SPIt*w_spit
# EAC caculated by EVM
def getEAC(project_status, BAC, evaluation_index, PF):
return round(project_status[evaluation_index]['AC'] + (BAC - project_status[evaluation_index]['EV'])/PF, 2)
def getEACGM(project_status, xdata, evaluation_index, growModel, parametersEstimated, BAC, index = 1.0):
if(growModel == 'gompertz'):
restBudget = (Funcs.gomperzt_func(parametersEstimated, index) - Funcs.gomperzt_func(parametersEstimated, xdata[evaluation_index]))*BAC
elif(growModel == 'logistic'):
restBudget = (Funcs.logistic_func(parametersEstimated, index) - Funcs.logistic_func(parametersEstimated, xdata[evaluation_index]))*BAC
elif(growModel == 'bass'):
restBudget = (Funcs.bass_func(parametersEstimated, index) - Funcs.bass_func(parametersEstimated, xdata[evaluation_index]))*BAC
elif(growModel == 'weibull'):
restBudget = (Funcs.weibull_func(parametersEstimated, index) - Funcs.weibull_func(parametersEstimated, xdata[evaluation_index]))*BAC
else:
restBudget = (Funcs.log_logistic_func(parametersEstimated, index) - Funcs.log_logistic_func(parametersEstimated, xdata[evaluation_index]))*BAC
return round(project_status[evaluation_index]['AC'] + restBudget, 2)
def optimizeLeastSquares(growModel, xdata, ydata, method = 'trf'):
x0 = [0.1, 0.2, 0.3]
# max_nfev = 2000 # maxium of function evaluations
# bounds = [(0,0,0), (2,2,2)] # lower adn upper bounds on independent variables.
# ftol = 1e-8 # default tolerance of termination by the change of cost function. dF < ftol*F
if(growModel == 'gompertz'):
OptimizeResult = optimize.least_squares(Funcs.residuals, x0,method = method,
args = ( xdata, ydata,Funcs.gomperzt_func) )
elif(growModel == 'logistic'):
OptimizeResult = optimize.least_squares(Funcs.residuals, x0,method = method,
args = ( xdata, ydata,Funcs.logistic_func) )
elif(growModel == 'bass'):
OptimizeResult = optimize.least_squares(Funcs.residuals, x0,method = method,
args = ( xdata, ydata,Funcs.bass_func) )
elif(growModel == 'log_logistic'):
x0 = [0.1, 0.2]
OptimizeResult = optimize.least_squares(Funcs.residuals, x0,method = method,
args = ( xdata, ydata,Funcs.log_logistic_func) )
else:
OptimizeResult = optimize.least_squares(Funcs.residuals, x0,method = method,
args = ( xdata, ydata,Funcs.weibull_func) )
parametersEstimated = OptimizeResult.x
return parametersEstimated
def estimate(project_id, grow_model, evaluation_point, algorithm="trf"):
project = get_or_none(ProjectModel, pk=project_id)
if project is None:
return {}
pd = project.pd
budget = project.budget
project_status = json.loads(project.status)
number_project_status_point = Funcs.count_number_project_status_point(pd, project_status)
xdata = np.zeros(number_project_status_point)
ydata = np.zeros(number_project_status_point)
evaluation_index = Funcs.get_index_of_evaluation(project_status, number_project_status_point, evaluation_point)
Funcs.init(xdata, ydata, pd, budget, project_status, evaluation_index, number_project_status_point)
ES = Funcs.getES(project_status, evaluation_index)
SPI = Funcs.getSPI(project_status, evaluation_index)
CPI = Funcs.getCPI(project_status, evaluation_index)
SPIt = Funcs.getSPIt(project_status, evaluation_index)
SCI = Funcs.getSCI(SPI, CPI)
SCIt = Funcs.getSCIt(SPIt, CPI)
TV = Funcs.getTV(budget, pd, project_status, evaluation_index)
ED = Funcs.getED(project_status, SPI, evaluation_index)
EACtPV1 = Funcs.getEACtPV1(pd, Funcs.getTV(budget, pd, project_status, evaluation_index))
EACtPV2 = Funcs.getEACtPV2(pd, SPI)
EACtPV3 = Funcs.getEACtPV3(pd, SCI)
EACtED1 = Funcs.getEACtED(pd, project_status, ED, 1, evaluation_index)
EACtED2 = Funcs.getEACtED(pd, project_status, ED, SPI, evaluation_index)
EACtED3 = Funcs.getEACtED(pd, project_status, ED, SCI, evaluation_index)
EACtES1 = Funcs.getEACtES(pd, project_status, ES, 1, evaluation_index)
EACtES2 = Funcs.getEACtES(pd, project_status, ES, SPIt, evaluation_index)
EACtES3 = Funcs.getEACtES(pd, project_status, ES, SCIt, evaluation_index)
EAC1 = Funcs.getEAC(project_status, budget, evaluation_index, 1)
EAC2 = Funcs.getEAC(project_status, budget, evaluation_index, CPI)
EAC3 = Funcs.getEAC(project_status, budget, evaluation_index, SPI)
EAC3_SPI = Funcs.getEAC(project_status, budget, evaluation_index, SPI)
EAC3_SPIt = Funcs.getEAC(project_status, budget, evaluation_index, SPIt)
EAC4_SCI = Funcs.getEAC(project_status, budget, evaluation_index, SCI)
EAC4_SCIt = Funcs.getEAC(project_status, budget, evaluation_index, SCIt)
CI = Funcs.getCI(CPI, 0.8, SPI, 0.2)
CIt = Funcs.getCIt(CPI, 0.8, SPIt, 0.2)
EAC5_CI = Funcs.getEAC(project_status, budget, evaluation_index, CI)
EAC5_CIt = Funcs.getEAC(project_status, budget, evaluation_index, CIt)
parametersEstimated = Funcs.optimizeLeastSquares(grow_model, xdata, ydata, method=algorithm)
EAC_GM1 = Funcs.getEACGM(project_status, xdata, evaluation_index, grow_model, parametersEstimated, budget, 1.0)
EAC_GM2 = Funcs.getEACGM(project_status, xdata, evaluation_index, grow_model, parametersEstimated, budget, 1.0/SPIt)
alpha = round(parametersEstimated[0], 6)
beta = round(parametersEstimated[1], 6)
if(grow_model == 'log_logistic'):
gamma = ''
else:
gamma = round(parametersEstimated[2], 6)
data = {
'alpha': alpha,
'beta': beta,
'gamma': gamma,
'ES': ES,
'SPI': SPI,
'CPI': CPI,
'SPIt': SPIt,
'SCI': SCI,
'SCIt': SCIt,
'TV': TV,
'ED': ED,
'EACtPV1': EACtPV1,
'EACtPV2': EACtPV2,
'EACtPV3': EACtPV3,
'EACtED1': EACtED1,
'EACtED2': EACtED2,
'EACtED3': EACtED3,
'EACtES1': EACtES1,
'EACtES2': EACtES2,
'EACtES3': EACtES3,
'EAC1': EAC1,
'EAC2': EAC2,
'EAC3_SPI': EAC3_SPI,
'EAC3_SPIt': EAC3_SPIt,
'EAC4_SCI': EAC4_SCI,
'EAC4_SCIt': EAC4_SCIt,
'EAC5_CI': EAC5_CI,
'EAC5_CIt': EAC5_CIt,
'EAC_GM1': EAC_GM1,
'EAC_GM2': EAC_GM2
}
return data
def get_pe(project_id, grow_model, evaluation_point):
project = get_or_none(ProjectModel, pk=project_id)
if project is None:
return {}
project_ac = project.get_ac()
estimate_data = Funcs.estimate(project_id, grow_model, evaluation_point)
data = {}
data['pe_EAC1'] = round((estimate_data['EAC1'] - project_ac)*100/project_ac, 2)
data['pe_EAC2'] = round((estimate_data['EAC2'] - project_ac)*100/project_ac, 2)
data['pe_EAC3_SPI'] = round((estimate_data['EAC3_SPI'] - project_ac)*100/project_ac, 2)
data['pe_EAC3_SPIt'] = round((estimate_data['EAC3_SPIt'] - project_ac)*100/project_ac, 2)
data['pe_EAC4_SCI'] = round((estimate_data['EAC4_SCI'] - project_ac)*100/project_ac, 2)
data['pe_EAC4_SCIt'] = round((estimate_data['EAC4_SCIt'] - project_ac)*100/project_ac, 2)
data['pe_EAC5_CI'] = round((estimate_data['EAC5_CI'] - project_ac)*100/project_ac, 2)
data['pe_EAC5_CIt'] = round((estimate_data['EAC5_CIt'] - project_ac)*100/project_ac, 2)
data['pe_EAC_GM1'] = round((estimate_data['EAC_GM1'] - project_ac)*100/project_ac, 2)
data['pe_EAC_GM2'] = round((estimate_data['EAC_GM2'] - project_ac)*100/project_ac, 2)
return data
def get_mape(project_ids, grow_model, evaluation_percent):
projects = ProjectModel.objects.filter(pk__in=project_ids)
data_mape = {}
data_pe = {}
for project in projects:
evaluation_point = math.ceil(project.pd*evaluation_percent)
data_pe['{}'.format(project.id)] = Funcs.get_pe(project.id, grow_model, evaluation_point)
sum_pe = [0,0,0,0,0,0,0,0,0,0]
for project_id in project_ids:
sum_pe[0] += abs(data_pe['{}'.format(project_id)]['pe_EAC1'])
sum_pe[1] += abs(data_pe['{}'.format(project_id)]['pe_EAC2'])
sum_pe[2] += abs(data_pe['{}'.format(project_id)]['pe_EAC3_SPI'])
sum_pe[3] += abs(data_pe['{}'.format(project_id)]['pe_EAC3_SPIt'])
sum_pe[4] += abs(data_pe['{}'.format(project_id)]['pe_EAC4_SCI'])
sum_pe[5] += abs(data_pe['{}'.format(project_id)]['pe_EAC4_SCIt'])
sum_pe[6] += abs(data_pe['{}'.format(project_id)]['pe_EAC5_CI'])
sum_pe[7] += abs(data_pe['{}'.format(project_id)]['pe_EAC5_CIt'])
sum_pe[8] += abs(data_pe['{}'.format(project_id)]['pe_EAC_GM1'])
sum_pe[9] += abs(data_pe['{}'.format(project_id)]['pe_EAC_GM2'])
return {
'mape_EAC1': round(sum_pe[0]/len(project_ids), 2),
'mape_EAC2': round(sum_pe[1]/len(project_ids), 2),
'mape_EAC3_SPI': round(sum_pe[2]/len(project_ids), 2),
'mape_EAC3_SPIt': round(sum_pe[3]/len(project_ids), 2),
'mape_EAC4_SCI': round(sum_pe[4]/len(project_ids), 2),
'mape_EAC4_SCIt': round(sum_pe[5]/len(project_ids), 2),
'mape_EAC5_CI': round(sum_pe[6]/len(project_ids), 2),
'mape_EAC5_CIt': round(sum_pe[7]/len(project_ids), 2),
'mape_EAC_GM1': round(sum_pe[8]/len(project_ids), 2),
'mape_EAC_GM2': round(sum_pe[9]/len(project_ids), 2)
}
class MyIO:
def writeResultToFile(project_name, grow_model, evaluation_time, data):
static_dir = settings.STATICFILES_DIRS[0]
#Creating a folder in static directory
new_pro_dir_path = os.path.join(static_dir,'%s'%(project_name))
new_pro_gro_dir_path = os.path.join(static_dir,'%s/%s'%(project_name, grow_model))
new_pro_gro_eva_dir_path = os.path.join(static_dir, '%s/%s/%s'%(project_name, grow_model, evaluation_time))
result_file_path = os.path.join(static_dir, '%s/%s/%s/data.json'%(project_name, grow_model, evaluation_time))
if not os.path.exists(new_pro_dir_path):
os.makedirs(new_pro_dir_path)
if not os.path.exists(new_pro_gro_dir_path):
os.makedirs(new_pro_gro_dir_path)
if not os.path.exists(new_pro_gro_eva_dir_path):
os.makedirs(new_pro_gro_eva_dir_path)
with open(result_file_path, 'w') as f:
json.dump(data, f)
class EstimateController:
def index(request):
projects = ProjectModel.get_projects_has_access(request.user.id)
return render(request, 'tool/estimate/index.html', {'projects': projects})
def estimate(request):
project_id = int(request.GET.get('project_id'))
grow_model = request.GET.get('grow_model')
evaluation_point = int(request.GET.get('evaluation_point'))
algorithm = request.GET.get('algorithm')
data = Funcs.estimate(project_id, grow_model, evaluation_point, algorithm)
return JsonResponse({'status': 200, 'data': data})
class PeController:
def get_pe(request):
project_id = request.GET.get('project_id')
project = get_or_none(ProjectModel, pk=project_id)
project_ac = project.get_ac()
evaluation_percents = [0.25, 0.5, 0.75]
grow_models = ['gompertz', 'logistic', 'bass', 'weibull']
# grow_models = ['gompertz', 'logistic', 'weibull', 'bass', 'log_logistic']
data = {}
for grow_model in grow_models:
data['{}'.format(grow_model)] = {}
for evaluation_percent in evaluation_percents:
evaluation_point = math.ceil(project.pd*evaluation_percent)
data['{}'.format(grow_model)]['{}'.format(evaluation_percent)] = Funcs.get_pe(project_id, grow_model, evaluation_point)
return JsonResponse({'data': data})
def get_mape(request):
project_ids_str = request.GET.get('project_ids')
project_ids = json.loads(project_ids_str)
evaluation_percents = [0.25, 0.5, 0.75]
grow_models = ['gompertz', 'logistic', 'bass', 'weibull']
# grow_models = ['gompertz', 'logistic', 'weibull', 'bass', 'log_logistic']
data = {}
for grow_model in grow_models:
data['{}'.format(grow_model)] = {}
for evaluation_percent in evaluation_percents:
data['{}'.format(grow_model)]['{}'.format(evaluation_percent)] = Funcs.get_mape(project_ids, grow_model, evaluation_percent)
return JsonResponse({'data': data})
|
the-stack_106_32063 | # coding=utf-8
import pygame
from game import *
class Pool(object):
def __init__(self, x, y, width, height, style='grass'):
self.x, self.y = x, y
self.Vx, self.Vy = 0, 0
self.width, self.height = width, height
# Platform images
self.image = pygame.image.load('environment\\main\\' + style + 'Mid.png').convert_alpha()
self.plainImage = pygame.image.load('environment\\main\\' + style + 'Center.png')
self.leftImage = pygame.image.load('environment\\main\\' + style + 'CliffLeft.png').convert_alpha()
self.rightImage = pygame.image.load('environment\\main\\' + style + 'CliffRight.png').convert_alpha()
self.tileWidth = pygame.Surface.get_width(self.image)
# Water images
self.waterFilled = pygame.image.load('environment\\main\\liquidWater.png').convert_alpha()
self.waterTop = pygame.image.load('environment\\main\\liquidWaterTop_mid.png').convert_alpha()
# Update coordinates
self.width -= self.width % self.tileWidth
self.height -= self.height % self.tileWidth
self.poolStartX = int(self.x + 2 * self.tileWidth)
self.poolEndX = int(self.x + self.width - 2 * self.tileWidth)
# Other control variables
self.tilesOnEitherSide = 2
# -----------------------------------------------------------------------------------------------------------------
def update(self, surface):
self.updateMotion()
self.draw(surface)
# -----------------------------------------------------------------------------------------------------------------
def updateMotion(self):
# Increment position by velocity
self.x += self.Vx
self.y += self.Vy
# -----------------------------------------------------------------------------------------------------------------
def draw(self, surface):
# Draw edge platforms
surface.blit(self.leftImage, (self.x, self.y))
surface.blit(self.rightImage, (self.x + self.width - self.tileWidth, self.y))
# Draw platform tiles on either side of the pool
self.poolStartX = int(self.x + self.tilesOnEitherSide * self.tileWidth)
self.poolEndX = int(self.x + self.width - (1 + self.tilesOnEitherSide) * self.tileWidth)
for x in range(int(self.x) + self.tileWidth, self.poolStartX + self.tileWidth, self.tileWidth):
surface.blit(self.image, (x, self.y))
for x in range(self.poolEndX, int(self.x) + self.width - self.tileWidth, self.tileWidth):
surface.blit(self.image, (x, self.y))
# Draw pool side columns
for y in range(int(self.y + self.tileWidth), int(self.y + self.height), self.tileWidth):
surface.blit(self.plainImage, (self.poolStartX, y))
surface.blit(self.plainImage, (self.poolEndX, y))
# Draw bottom of pool
for x in range(self.poolStartX, self.poolEndX + self.tileWidth, self.tileWidth):
surface.blit(self.plainImage, (x, self.y + self.height))
# Fill with water
for y in range(int(self.y + self.tileWidth), int(self.y + self.height), self.tileWidth):
for x in range(self.poolStartX + self.tileWidth, self.poolEndX, self.tileWidth):
surface.blit(self.waterFilled, (x, y))
for x in range(self.poolStartX + self.tileWidth, self.poolEndX, self.tileWidth):
surface.blit(self.waterTop, (x, self.y))
|
the-stack_106_32064 | """View Module."""
from jinja2 import ChoiceLoader, Environment, PackageLoader, select_autoescape
from jinja2.exceptions import TemplateNotFound
from .exceptions import RequiredContainerBindingNotFound, ViewException
from .response import Responsable
class View(Responsable):
"""View class. Responsible for handling everything involved with views and view environments."""
_splice = "/"
def __init__(self, container):
"""View constructor.
Arguments:
container {masonite.app.App} -- Container object.
"""
self.dictionary = {}
self.composers = {}
self.container = container
# If the cache_for method is declared
self.cache = False
# Cache time of cache_for
self.cache_time = None
# Cache type of cache_for
self.cache_type = None
self.template = None
self.environments = []
self.extension = ".html"
self._jinja_extensions = ["jinja2.ext.loopcontrols"]
self._filters = {}
self._tests = {}
self._shared = {}
def render(self, template, dictionary={}):
"""Get the string contents of the view.
Arguments:
template {string} -- Name of the template you want to render.
Keyword Arguments:
dictionary {dict} -- Data that you want to pass into your view. (default: {{}})
Returns:
self
"""
if not isinstance(dictionary, dict):
raise ViewException(
"Second parameter to render method needs to be a dictionary, {} passed.".format(
type(dictionary).__name__
)
)
self.__load_environment(template)
self.dictionary = {}
self.dictionary.update(dictionary)
self.dictionary.update(self._shared)
# Check if use cache and return template from cache if exists
if (
self.container.has("Cache")
and self.__cached_template_exists()
and not self.__is_expired_cache()
):
return self.__get_cached_template()
# Check if composers are even set for a speed improvement
if self.composers:
self._update_from_composers()
if self._tests:
self.env.tests.update(self._tests)
self.rendered_template = self._render()
return self
def _render(self):
try:
# Try rendering the template with '.html' appended
return self.env.get_template(self.filename).render(self.dictionary)
except TemplateNotFound:
# Try rendering the direct template the user has supplied
return self.env.get_template(self.template).render(self.dictionary)
def _update_from_composers(self):
"""Add data into the view from specified composers."""
# Check if the template is directly specified in the composer
if self.template in self.composers:
self.dictionary.update(self.composers.get(self.template))
# Check if there is just an astericks in the composer
if "*" in self.composers:
self.dictionary.update(self.composers.get("*"))
# We will append onto this string for an easier way to search through wildcard routes
compiled_string = ""
# Check for wildcard view composers
for template in self.template.split(self._splice):
# Append the template onto the compiled_string
compiled_string += template
if self.composers.get("{}*".format(compiled_string)):
self.dictionary.update(self.composers["{}*".format(compiled_string)])
else:
# Add a slash to symbolize going into a deeper directory structure
compiled_string += "/"
def composer(self, composer_name, dictionary):
"""Update composer dictionary.
Arguments:
composer_name {string} -- Key to bind dictionary of data to.
dictionary {dict} -- Dictionary of data to add to controller.
Returns:
self
"""
if isinstance(composer_name, str):
self.composers[composer_name] = dictionary
if isinstance(composer_name, list):
for composer in composer_name:
self.composers[composer] = dictionary
return self
def share(self, dictionary):
"""Share data to all templates.
Arguments:
dictionary {dict} -- Dictionary of key value pairs to add to all views.
Returns:
self
"""
self._shared.update(dictionary)
return self
def cache_for(self, time=None, cache_type=None):
"""Set time and type for cache.
Keyword Arguments:
time {string} -- Time to cache template for (default: {None})
cache_type {string} -- Type of the cache. (default: {None})
Raises:
RequiredContainerBindingNotFound -- Thrown when the Cache key binding is not found in the container.
Returns:
self
"""
if not self.container.has("Cache"):
raise RequiredContainerBindingNotFound(
"The 'Cache' container binding is required to use this method and wasn't found in the container. You may be missing a Service Provider"
)
self.cache = True
self.cache_time = float(time)
self.cache_type = cache_type
if self.__is_expired_cache():
self.__create_cache_template(self.template)
return self
def exists(self, template):
"""Check if a template exists.
Arguments:
template {string} -- Name of the template to check for.
Returns:
bool
"""
self.__load_environment(template)
try:
self.env.get_template(self.filename)
return True
except TemplateNotFound:
return False
def add_environment(self, template_location, loader=PackageLoader):
"""Add an environment to the templates.
Arguments:
template_location {string} -- Directory location to attach the environment to.
Keyword Arguments:
loader {jinja2.Loader} -- Type of Jinja2 loader to use. (default: {jinja2.PackageLoader})
"""
if loader == PackageLoader:
template_location = template_location.split(self._splice)
self.environments.append(
loader(template_location[0], "/".join(template_location[1:]))
)
else:
self.environments.append(loader(template_location))
def filter(self, name, function):
"""Use to add filters to views.
Arguments:
name {string} -- Key to bind the filter to.
function {object} -- Function used for the template filter.
"""
self._filters.update({name: function})
def test(self, key, obj):
self._tests.update({key: obj})
return self
def add_extension(self, extension):
self._jinja_extensions.append(extension)
return self
def __load_environment(self, template):
"""Private method for loading all the environments.
Arguments:
template {string} -- Template to load environment from.
"""
self.template = template
self.filename = (
template.replace(self._splice, "/").replace(".", "/") + self.extension
)
if template.startswith("/"):
# Filter blanks strings from the split
location = list(filter(None, template.split("/")))
self.filename = location[-1] + self.extension
loader = ChoiceLoader(
[PackageLoader(location[0], "/".join(location[1:-1]))]
+ self.environments
)
self.env = Environment(
loader=loader,
autoescape=select_autoescape(["html", "xml"]),
extensions=self._jinja_extensions,
line_statement_prefix="@",
)
else:
loader = ChoiceLoader(
[PackageLoader("resources", "templates")] + self.environments
)
# Set the searchpath since some packages look for this object
# This is sort of a hack for now
loader.searchpath = ""
self.env = Environment(
loader=loader,
autoescape=select_autoescape(["html", "xml"]),
extensions=self._jinja_extensions,
line_statement_prefix="@",
)
self.env.filters.update(self._filters)
def __create_cache_template(self, template):
"""Save in the cache the template.
Arguments:
template {string} -- Creates the cached templates.
"""
self.container.make("Cache").store_for(
template,
self.rendered_template,
self.cache_time,
self.cache_type,
".html",
)
def __cached_template_exists(self):
"""Check if the cache template exists.
Returns:
bool
"""
return self.container.make("Cache").exists(self.template)
def __is_expired_cache(self):
"""Check if cache is expired.
Returns:
bool
"""
# Check if cache_for is set and configurate
if self.cache_time is None or self.cache_type is None and self.cache:
return True
driver_cache = self.container.make("Cache")
# True is expired
return not driver_cache.is_valid(self.template)
def __get_cached_template(self):
"""Return the cached version of the template.
Returns:
self
"""
driver_cache = self.container.make("Cache")
self.rendered_template = driver_cache.get(self.template)
return self
def set_splice(self, splice):
self._splice = splice
return self
def get_response(self):
return self.rendered_template
|
the-stack_106_32065 | from __future__ import print_function
import torch
from model import highwayNet
from utils import ngsimDataset,maskedNLL,maskedMSETest,maskedNLLTest
from torch.utils.data import DataLoader
import time
## Network Arguments
args = {}
args['use_cuda'] = True
args['encoder_size'] = 64
args['decoder_size'] = 128
args['in_length'] = 16
args['out_length'] = 25
args['grid_size'] = (13,3)
args['soc_conv_depth'] = 64
args['conv_3x1_depth'] = 16
args['dyn_embedding_size'] = 32
args['input_embedding_size'] = 32
args['num_lat_classes'] = 3
args['num_lon_classes'] = 2
args['use_maneuvers'] = False
args['train_flag'] = False
# Evaluation metric:
metric = 'nll' #or rmse
# Initialize network
net = highwayNet(args)
net.load_state_dict(torch.load('trained_models/cslstm_m.tar'))
if args['use_cuda']:
net = net.cuda()
tsSet = ngsimDataset('data/TestSet.mat')
tsDataloader = DataLoader(tsSet,batch_size=128,shuffle=True,num_workers=8,collate_fn=tsSet.collate_fn)
lossVals = torch.zeros(25).cuda()
counts = torch.zeros(25).cuda()
for i, data in enumerate(tsDataloader):
st_time = time.time()
hist, nbrs, mask, lat_enc, lon_enc, fut, op_mask = data
# Initialize Variables
if args['use_cuda']:
hist = hist.cuda()
nbrs = nbrs.cuda()
mask = mask.cuda()
lat_enc = lat_enc.cuda()
lon_enc = lon_enc.cuda()
fut = fut.cuda()
op_mask = op_mask.cuda()
if metric == 'nll':
# Forward pass
if args['use_maneuvers']:
fut_pred, lat_pred, lon_pred = net(hist, nbrs, mask, lat_enc, lon_enc)
l,c = maskedNLLTest(fut_pred, lat_pred, lon_pred, fut, op_mask)
else:
fut_pred = net(hist, nbrs, mask, lat_enc, lon_enc)
l, c = maskedNLLTest(fut_pred, 0, 0, fut, op_mask,use_maneuvers=False)
else:
# Forward pass
if args['use_maneuvers']:
fut_pred, lat_pred, lon_pred = net(hist, nbrs, mask, lat_enc, lon_enc)
fut_pred_max = torch.zeros_like(fut_pred[0])
for k in range(lat_pred.shape[0]):
lat_man = torch.argmax(lat_pred[k, :]).detach()
lon_man = torch.argmax(lon_pred[k, :]).detach()
indx = lon_man*3 + lat_man
fut_pred_max[:,k,:] = fut_pred[indx][:,k,:]
l, c = maskedMSETest(fut_pred_max, fut, op_mask)
else:
fut_pred = net(hist, nbrs, mask, lat_enc, lon_enc)
l, c = maskedMSETest(fut_pred, fut, op_mask)
lossVals +=l.detach()
counts += c.detach()
if metric == 'nll':
print(lossVals / counts)
else:
print(torch.pow(lossVals / counts,0.5)*0.3048) # Calculate RMSE and convert from feet to meters
|
the-stack_106_32066 | # -*- coding: utf-8 -*-
"""
Wind Setbacks tests
"""
from click.testing import CliRunner
import json
import numpy as np
import os
import pytest
import shutil
import tempfile
import traceback
from rex.utilities.loggers import LOGGERS
from reVX import TESTDATADIR
from reVX.handlers.geotiff import Geotiff
from reVX.wind_setbacks import (StructureWindSetbacks,
RailWindSetbacks)
from reVX.wind_setbacks.wind_setbacks_cli import main
EXCL_H5 = os.path.join(TESTDATADIR, 'setbacks', 'ri_setbacks.h5')
HUB_HEIGHT = 135
ROTOR_DIAMETER = 200
MULTIPLIER = 3
REGS_FPATH = os.path.join(TESTDATADIR, 'setbacks', 'ri_wind_regs_fips.csv')
REGS_GPKG = os.path.join(TESTDATADIR, 'setbacks', 'ri_wind_regs_fips.gpkg')
@pytest.fixture(scope="module")
def runner():
"""
cli runner
"""
return CliRunner()
def test_generic_structure():
"""
Test generic structures setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks',
'generic_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=MULTIPLIER)
structure_path = os.path.join(TESTDATADIR, 'setbacks',
'RhodeIsland.geojson')
test = setbacks.compute_setbacks(structure_path)
assert np.allclose(baseline, test)
@pytest.mark.parametrize('max_workers', [None, 1])
def test_local_structures(max_workers):
"""
Test local structures setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks',
'existing_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=REGS_GPKG, multiplier=None)
structure_path = os.path.join(TESTDATADIR, 'setbacks',
'RhodeIsland.geojson')
test = setbacks.compute_setbacks(structure_path, max_workers=max_workers)
assert np.allclose(baseline, test)
def test_generic_railroads():
"""
Test generic rail setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks', 'generic_rails.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = RailWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=MULTIPLIER)
rail_path = os.path.join(TESTDATADIR, 'setbacks', 'RI_Railroads',
'RI_Railroads.shp')
test = setbacks.compute_setbacks(rail_path)
assert np.allclose(baseline, test)
@pytest.mark.parametrize('max_workers', [None, 1])
def test_local_railroads(max_workers):
"""
Test local rail setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks', 'existing_rails.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = RailWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=REGS_GPKG, multiplier=None)
rail_path = os.path.join(TESTDATADIR, 'setbacks', 'RI_Railroads',
'RI_Railroads.shp')
test = setbacks.compute_setbacks(rail_path, max_workers=max_workers)
assert np.allclose(baseline, test)
def test_setback_preflight_check():
"""
Test BaseWindSetbacks preflight_checks
"""
with pytest.raises(RuntimeError):
StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=None)
def test_cli(runner):
"""
Test CLI
"""
structure_dir = os.path.join(TESTDATADIR, 'setbacks')
with tempfile.TemporaryDirectory() as td:
regs_fpath = os.path.basename(REGS_FPATH)
regs_fpath = os.path.join(td, regs_fpath)
shutil.copy(REGS_FPATH, regs_fpath)
config = {
"directories": {
"log_directory": td,
"output_directory": td
},
"execution_control": {
"option": "local"
},
"excl_fpath": EXCL_H5,
"feature_type": "structure",
"features_path": structure_dir,
"hub_height": HUB_HEIGHT,
"log_level": "INFO",
"regs_fpath": regs_fpath,
"replace": True,
"rotor_diameter": ROTOR_DIAMETER
}
config_path = os.path.join(td, 'config.json')
with open(config_path, 'w') as f:
json.dump(config, f)
result = runner.invoke(main, ['from-config',
'-c', config_path])
msg = ('Failed with error {}'
.format(traceback.print_exception(*result.exc_info)))
assert result.exit_code == 0, msg
baseline = os.path.join(TESTDATADIR, 'setbacks',
'generic_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
test = os.path.join(td, 'RhodeIsland.tif')
with Geotiff(test) as tif:
test = tif.values
np.allclose(baseline, test)
LOGGERS.clear()
def execute_pytest(capture='all', flags='-rapP'):
"""Execute module as pytest with detailed summary report.
Parameters
----------
capture : str
Log or stdout/stderr capture option. ex: log (only logger),
all (includes stdout/stderr)
flags : str
Which tests to show logs and results for.
"""
fname = os.path.basename(__file__)
pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])
if __name__ == '__main__':
execute_pytest()
|
the-stack_106_32068 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
from akg.utils import kernel_exec as utils
from test_op import add_a_conv
from test_run.conv_utils import conv_forward_naive
from test_run.conv_utils import random_gaussian
def add_a_conv_run(fmap_shape, filter_shape, pad_, stride_, dilation_,
use_bias=False, bypass_l1=False, dump_data=False, Tile=None, attrs=None):
if Tile is None:
Tile = [0, 0, 0, 0, 0]
mod = add_a_conv.add_a_conv(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh=Tile[0], tile_coco=Tile[1], tile_mm=Tile[2], tile_kk=Tile[3], tile_nn=Tile[4], bypass_l1=bypass_l1,
use_bias=use_bias, block_size=16, conv_dtype='float16')
fmap_data, filter_data, bias_data, expect = gen_data(fmap_shape, filter_shape, pad_[0], stride_[0], dilation_[0], use_bias)
if dump_data:
with open('input.bin', 'wb') as fo:
fo.write(fmap_data.astype(np.float16, copy=False))
with open('filter.bin', 'wb') as fo:
fo.write(filter_data.astype(np.float16, copy=False))
with open('bias.bin', 'wb') as fo:
fo.write(bias_data.astype(np.float16, copy=False))
with open('output.bin', 'wb') as fo:
fo.write(expect.astype(np.float16, copy=False))
out_data = np.full(expect.shape, 0, 'float16')
if use_bias:
input = (fmap_data, filter_data, bias_data)
args = (fmap_data, filter_data, bias_data, out_data)
else:
input = (fmap_data, filter_data)
args = (fmap_data, filter_data, out_data)
out_data = utils.mod_launch(mod, args, expect=expect)
# abs(output, expect) < 5*(10)^(-3) * abs(expect)
data_len = expect.size
try:
actual = out_data
N, C1, H, W, C0 = out_data.shape
error = 0
count = 0
lastErr = -2
continueErr = 0
maxContinue = -1
maxEnd = 0
partial_debug = 0
for n in range(N):
for c1 in range(C1):
for h in range(H):
for w in range(W):
for c0 in range(C0):
a = actual[n, c1, h, w, c0]
b = expect[n, c1, h, w, c0]
if (abs(a - b) > abs(b) * 5e-03):
if (partial_debug and (a == 0.0)):
continue
error += 1
if lastErr + 1 == count:
continueErr += 1
else:
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
continueErr = 1
lastErr = count
# if error < 35:
# print ("count: %6d expect: %10f actual: %10f %10.2f%%"%(count, b, a, abs((b-a)/b*100)))
count += 1
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
print("error num: %d/%d (%.2f%%)" % (error, count, 100.0 * error / count))
print("longest error range: [%d, %d]" % (maxEnd - maxContinue + 1, maxEnd))
sys.stdout.flush()
if maxContinue >= 16:
os._exit(-1)
else:
assert_res = True
np.testing.assert_allclose(actual, expect, rtol=5e-03, equal_nan=True, verbose=True)
print("\n\n******************** test ok *****************\n\n")
except BaseException as e:
np.savetxt("actual.txt", out_data.reshape(data_len))
np.savetxt("expect.txt", expect.reshape(data_len))
print(str(e))
return input, out_data, expect, assert_res
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias):
IN, IC, IH, IW = fm_shape
C0 = 16
IC = ((IC + C0 - 1) // C0) * C0
WN, WC, WH, WW = w_shape
WN = ((WN + C0 - 1) // C0) * C0
WC = ((WC + C0 - 1) // C0) * C0
ON = IN
OC = WN
WHD = (WH - 1) * dilation + 1
WWD = (WW - 1) * dilation + 1
OH = (IH + 2 * pad - WHD) // stride + 1
OW = (IW + 2 * pad - WWD) // stride + 1
x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
w = random_gaussian((WN, WC, WH, WW), miu=0.5, sigma=0.01).astype(np.float16)
x_add = x + 1.0
if bias:
b = np.random.rand(WN).astype(np.float16, copy=False)
else:
b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)
conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
out = conv_forward_naive(x_add, w, b, conv_param)
''' transpose to 5D - NC1HWC0 '''
feature = x.reshape(IN, IC // C0, C0, IH, IW).transpose(0, 1, 3, 4, 2).copy()
''' transpose to 5D - C1HWNC0 '''
filter = w.reshape(WN, WC // C0, C0, WH, WW).transpose(1, 3, 4, 0, 2).copy()
''' transpose to 5D - NC1HWC0 '''
output = out.reshape(ON, OC // C0, C0, OH, OW).transpose(0, 1, 3, 4, 2).copy()
return feature, filter, b, output
|
the-stack_106_32069 | from . import QtCore
class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel):
"""Filters to the regex if any of the children matches allow parent"""
def filterAcceptsRow(self, row, parent):
regex = self.filterRegExp()
if not regex.isEmpty():
pattern = regex.pattern()
model = self.sourceModel()
source_index = model.index(row, self.filterKeyColumn(), parent)
if source_index.isValid():
# Check current index itself
key = model.data(source_index, self.filterRole())
if re.search(pattern, key, re.IGNORECASE):
return True
# Check children
rows = model.rowCount(source_index)
for i in range(rows):
if self.filterAcceptsRow(i, source_index):
return True
# Otherwise filter it
return False
return super(RecursiveSortFilterProxyModel,
self).filterAcceptsRow(row, parent)
|
the-stack_106_32071 | import threading
import time
# global variables
hero_health = 40
orc_health = 7
dragon_health = 20
def thread_orc():
global hero_health
global orc_health
while orc_health > 0 and hero_health > 0:
time.sleep(1.5)
hero_health = hero_health - 1
print("Orc attacked... Hero health: ", hero_health)
def thread_dragon():
global hero_health
global dragon_health
while dragon_health > 0 and hero_health > 0:
time.sleep(2.0)
hero_health = hero_health - 3
print("Dragon attacked... Hero health: ", hero_health)
# making threads for orc and dragon
orc = threading.Thread(target=thread_orc)
dragon = threading.Thread(target=thread_dragon)
# to start the thread
orc.start()
dragon.start()
# main user loop
while hero_health > 0 and orc_health > 0 and dragon_health > 0:
var = input("attack ")
if var == "orc":
orc_health = orc_health - 2
print("Hero attack Orc ... Orc health is ", str(orc_health))
elif var == "dragon":
dragon_health = dragon_health - 2
print("Hero attack dragon ... Dragon health is ", str(dragon_health))
# Wait for threads to finish
orc.join()
dragon.join()
|
the-stack_106_32072 | import warnings
import attr
import cattr
from typing import (
Dict,
Optional,
List,
Any,
DefaultDict,
Mapping,
Tuple,
Union,
ClassVar,
)
from enum import Enum
import collections
import argparse
import abc
import numpy as np
import math
import copy
from mlagents.trainers.cli_utils import StoreConfigFile, DetectDefault, parser
from mlagents.trainers.cli_utils import load_config
from mlagents.trainers.exception import TrainerConfigError, TrainerConfigWarning
from mlagents_envs import logging_util
from mlagents_envs.side_channel.environment_parameters_channel import (
EnvironmentParametersChannel,
)
logger = logging_util.get_logger(__name__)
def check_and_structure(key: str, value: Any, class_type: type) -> Any:
attr_fields_dict = attr.fields_dict(class_type)
if key not in attr_fields_dict:
raise TrainerConfigError(
f"The option {key} was specified in your YAML file for {class_type.__name__}, but is invalid."
)
# Apply cattr structure to the values
return cattr.structure(value, attr_fields_dict[key].type)
def strict_to_cls(d: Mapping, t: type) -> Any:
if not isinstance(d, Mapping):
raise TrainerConfigError(f"Unsupported config {d} for {t.__name__}.")
d_copy: Dict[str, Any] = {}
d_copy.update(d)
for key, val in d_copy.items():
d_copy[key] = check_and_structure(key, val, t)
return t(**d_copy)
def defaultdict_to_dict(d: DefaultDict) -> Dict:
return {key: cattr.unstructure(val) for key, val in d.items()}
def deep_update_dict(d: Dict, update_d: Mapping) -> None:
"""
Similar to dict.update(), but works for nested dicts of dicts as well.
"""
for key, val in update_d.items():
if key in d and isinstance(d[key], Mapping) and isinstance(val, Mapping):
deep_update_dict(d[key], val)
else:
d[key] = val
class SerializationSettings:
convert_to_onnx = True
onnx_opset = 9
@attr.s(auto_attribs=True)
class ExportableSettings:
def as_dict(self):
return cattr.unstructure(self)
class EncoderType(Enum):
MATCH3 = "match3"
SIMPLE = "simple"
NATURE_CNN = "nature_cnn"
RESNET = "resnet"
RESNET18 = "resnet18"
class ScheduleType(Enum):
CONSTANT = "constant"
LINEAR = "linear"
@attr.s(auto_attribs=True)
class NetworkSettings:
@attr.s
class MemorySettings:
sequence_length: int = attr.ib(default=64)
memory_size: int = attr.ib(default=128)
@memory_size.validator
def _check_valid_memory_size(self, attribute, value):
if value <= 0:
raise TrainerConfigError(
"When using a recurrent network, memory size must be greater than 0."
)
elif value % 2 != 0:
raise TrainerConfigError(
"When using a recurrent network, memory size must be divisible by 2."
)
normalize: bool = False
hidden_units: int = 128
num_layers: int = 2
vis_encode_type: EncoderType = EncoderType.SIMPLE
memory: Optional[MemorySettings] = None
@attr.s(auto_attribs=True)
class BehavioralCloningSettings:
demo_path: str
steps: int = 0
strength: float = 1.0
samples_per_update: int = 0
# Setting either of these to None will allow the Optimizer
# to decide these parameters, based on Trainer hyperparams
num_epoch: Optional[int] = None
batch_size: Optional[int] = None
@attr.s(auto_attribs=True)
class HyperparamSettings:
batch_size: int = 1024
buffer_size: int = 10240
learning_rate: float = 3.0e-4
learning_rate_schedule: ScheduleType = ScheduleType.CONSTANT
@attr.s(auto_attribs=True)
class PPOSettings(HyperparamSettings):
beta: float = 5.0e-3
epsilon: float = 0.2
lambd: float = 0.95
num_epoch: int = 3
learning_rate_schedule: ScheduleType = ScheduleType.LINEAR
@attr.s(auto_attribs=True)
class SACSettings(HyperparamSettings):
batch_size: int = 128
buffer_size: int = 50000
buffer_init_steps: int = 0
tau: float = 0.005
steps_per_update: float = 1
save_replay_buffer: bool = False
init_entcoef: float = 1.0
reward_signal_steps_per_update: float = attr.ib()
@reward_signal_steps_per_update.default
def _reward_signal_steps_per_update_default(self):
return self.steps_per_update
# INTRINSIC REWARD SIGNALS #############################################################
class RewardSignalType(Enum):
EXTRINSIC: str = "extrinsic"
GAIL: str = "gail"
CURIOSITY: str = "curiosity"
RND: str = "rnd"
def to_settings(self) -> type:
_mapping = {
RewardSignalType.EXTRINSIC: RewardSignalSettings,
RewardSignalType.GAIL: GAILSettings,
RewardSignalType.CURIOSITY: CuriositySettings,
RewardSignalType.RND: RNDSettings,
}
return _mapping[self]
@attr.s(auto_attribs=True)
class RewardSignalSettings:
gamma: float = 0.99
strength: float = 1.0
@staticmethod
def structure(d: Mapping, t: type) -> Any:
"""
Helper method to structure a Dict of RewardSignalSettings class. Meant to be registered with
cattr.register_structure_hook() and called with cattr.structure(). This is needed to handle
the special Enum selection of RewardSignalSettings classes.
"""
if not isinstance(d, Mapping):
raise TrainerConfigError(f"Unsupported reward signal configuration {d}.")
d_final: Dict[RewardSignalType, RewardSignalSettings] = {}
for key, val in d.items():
enum_key = RewardSignalType(key)
t = enum_key.to_settings()
d_final[enum_key] = strict_to_cls(val, t)
return d_final
@attr.s(auto_attribs=True)
class GAILSettings(RewardSignalSettings):
encoding_size: int = 64
learning_rate: float = 3e-4
use_actions: bool = False
use_vail: bool = False
demo_path: str = attr.ib(kw_only=True)
@attr.s(auto_attribs=True)
class CuriositySettings(RewardSignalSettings):
encoding_size: int = 64
learning_rate: float = 3e-4
@attr.s(auto_attribs=True)
class RNDSettings(RewardSignalSettings):
encoding_size: int = 64
learning_rate: float = 1e-4
# SAMPLERS #############################################################################
class ParameterRandomizationType(Enum):
UNIFORM: str = "uniform"
GAUSSIAN: str = "gaussian"
MULTIRANGEUNIFORM: str = "multirangeuniform"
CONSTANT: str = "constant"
def to_settings(self) -> type:
_mapping = {
ParameterRandomizationType.UNIFORM: UniformSettings,
ParameterRandomizationType.GAUSSIAN: GaussianSettings,
ParameterRandomizationType.MULTIRANGEUNIFORM: MultiRangeUniformSettings,
ParameterRandomizationType.CONSTANT: ConstantSettings
# Constant type is handled if a float is provided instead of a config
}
return _mapping[self]
@attr.s(auto_attribs=True)
class ParameterRandomizationSettings(abc.ABC):
seed: int = parser.get_default("seed")
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
raise TrainerConfigError(f"__str__ not implemented for type {self.__class__}.")
@staticmethod
def structure(
d: Union[Mapping, float], t: type
) -> "ParameterRandomizationSettings":
"""
Helper method to a ParameterRandomizationSettings class. Meant to be registered with
cattr.register_structure_hook() and called with cattr.structure(). This is needed to handle
the special Enum selection of ParameterRandomizationSettings classes.
"""
if isinstance(d, (float, int)):
return ConstantSettings(value=d)
if not isinstance(d, Mapping):
raise TrainerConfigError(
f"Unsupported parameter randomization configuration {d}."
)
if "sampler_type" not in d:
raise TrainerConfigError(
f"Sampler configuration does not contain sampler_type : {d}."
)
if "sampler_parameters" not in d:
raise TrainerConfigError(
f"Sampler configuration does not contain sampler_parameters : {d}."
)
enum_key = ParameterRandomizationType(d["sampler_type"])
t = enum_key.to_settings()
return strict_to_cls(d["sampler_parameters"], t)
@staticmethod
def unstructure(d: "ParameterRandomizationSettings") -> Mapping:
"""
Helper method to a ParameterRandomizationSettings class. Meant to be registered with
cattr.register_unstructure_hook() and called with cattr.unstructure().
"""
_reversed_mapping = {
UniformSettings: ParameterRandomizationType.UNIFORM,
GaussianSettings: ParameterRandomizationType.GAUSSIAN,
MultiRangeUniformSettings: ParameterRandomizationType.MULTIRANGEUNIFORM,
ConstantSettings: ParameterRandomizationType.CONSTANT,
}
sampler_type: Optional[str] = None
for t, name in _reversed_mapping.items():
if isinstance(d, t):
sampler_type = name.value
sampler_parameters = attr.asdict(d)
return {"sampler_type": sampler_type, "sampler_parameters": sampler_parameters}
@abc.abstractmethod
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the appropriate sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
pass
@attr.s(auto_attribs=True)
class ConstantSettings(ParameterRandomizationSettings):
value: float = 0.0
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
return f"Float: value={self.value}"
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the constant sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
env_channel.set_float_parameter(key, self.value)
@attr.s(auto_attribs=True)
class UniformSettings(ParameterRandomizationSettings):
min_value: float = attr.ib()
max_value: float = 1.0
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
return f"Uniform sampler: min={self.min_value}, max={self.max_value}"
@min_value.default
def _min_value_default(self):
return 0.0
@min_value.validator
def _check_min_value(self, attribute, value):
if self.min_value > self.max_value:
raise TrainerConfigError(
"Minimum value is greater than maximum value in uniform sampler."
)
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the uniform sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
env_channel.set_uniform_sampler_parameters(
key, self.min_value, self.max_value, self.seed
)
@attr.s(auto_attribs=True)
class GaussianSettings(ParameterRandomizationSettings):
mean: float = 1.0
st_dev: float = 1.0
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
return f"Gaussian sampler: mean={self.mean}, stddev={self.st_dev}"
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the gaussian sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
env_channel.set_gaussian_sampler_parameters(
key, self.mean, self.st_dev, self.seed
)
@attr.s(auto_attribs=True)
class MultiRangeUniformSettings(ParameterRandomizationSettings):
intervals: List[Tuple[float, float]] = attr.ib()
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
return f"MultiRangeUniform sampler: intervals={self.intervals}"
@intervals.default
def _intervals_default(self):
return [[0.0, 1.0]]
@intervals.validator
def _check_intervals(self, attribute, value):
for interval in self.intervals:
if len(interval) != 2:
raise TrainerConfigError(
f"The sampling interval {interval} must contain exactly two values."
)
min_value, max_value = interval
if min_value > max_value:
raise TrainerConfigError(
f"Minimum value is greater than maximum value in interval {interval}."
)
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the multirangeuniform sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
env_channel.set_multirangeuniform_sampler_parameters(
key, self.intervals, self.seed
)
# ENVIRONMENT PARAMETERS ###############################################################
@attr.s(auto_attribs=True)
class CompletionCriteriaSettings:
"""
CompletionCriteriaSettings contains the information needed to figure out if the next
lesson must start.
"""
class MeasureType(Enum):
PROGRESS: str = "progress"
REWARD: str = "reward"
behavior: str
measure: MeasureType = attr.ib(default=MeasureType.REWARD)
min_lesson_length: int = 0
signal_smoothing: bool = True
threshold: float = attr.ib(default=0.0)
require_reset: bool = False
@threshold.validator
def _check_threshold_value(self, attribute, value):
"""
Verify that the threshold has a value between 0 and 1 when the measure is
PROGRESS
"""
if self.measure == self.MeasureType.PROGRESS:
if self.threshold > 1.0:
raise TrainerConfigError(
"Threshold for next lesson cannot be greater than 1 when the measure is progress."
)
if self.threshold < 0.0:
raise TrainerConfigError(
"Threshold for next lesson cannot be negative when the measure is progress."
)
def need_increment(
self, progress: float, reward_buffer: List[float], smoothing: float
) -> Tuple[bool, float]:
"""
Given measures, this method returns a boolean indicating if the lesson
needs to change now, and a float corresponding to the new smoothed value.
"""
# Is the min number of episodes reached
if len(reward_buffer) < self.min_lesson_length:
return False, smoothing
if self.measure == CompletionCriteriaSettings.MeasureType.PROGRESS:
if progress > self.threshold:
return True, smoothing
if self.measure == CompletionCriteriaSettings.MeasureType.REWARD:
if len(reward_buffer) < 1:
return False, smoothing
measure = np.mean(reward_buffer)
if math.isnan(measure):
return False, smoothing
if self.signal_smoothing:
measure = 0.25 * smoothing + 0.75 * measure
smoothing = measure
if measure > self.threshold:
return True, smoothing
return False, smoothing
@attr.s(auto_attribs=True)
class Lesson:
"""
Gathers the data of one lesson for one environment parameter including its name,
the condition that must be fullfiled for the lesson to be completed and a sampler
for the environment parameter. If the completion_criteria is None, then this is
the last lesson in the curriculum.
"""
value: ParameterRandomizationSettings
name: str
completion_criteria: Optional[CompletionCriteriaSettings] = attr.ib(default=None)
@attr.s(auto_attribs=True)
class EnvironmentParameterSettings:
"""
EnvironmentParameterSettings is an ordered list of lessons for one environment
parameter.
"""
curriculum: List[Lesson]
@staticmethod
def _check_lesson_chain(lessons, parameter_name):
"""
Ensures that when using curriculum, all non-terminal lessons have a valid
CompletionCriteria, and that the terminal lesson does not contain a CompletionCriteria.
"""
num_lessons = len(lessons)
for index, lesson in enumerate(lessons):
if index < num_lessons - 1 and lesson.completion_criteria is None:
raise TrainerConfigError(
f"A non-terminal lesson does not have a completion_criteria for {parameter_name}."
)
if index == num_lessons - 1 and lesson.completion_criteria is not None:
warnings.warn(
f"Your final lesson definition contains completion_criteria for {parameter_name}."
f"It will be ignored.",
TrainerConfigWarning,
)
@staticmethod
def structure(d: Mapping, t: type) -> Dict[str, "EnvironmentParameterSettings"]:
"""
Helper method to structure a Dict of EnvironmentParameterSettings class. Meant
to be registered with cattr.register_structure_hook() and called with
cattr.structure().
"""
if not isinstance(d, Mapping):
raise TrainerConfigError(
f"Unsupported parameter environment parameter settings {d}."
)
d_final: Dict[str, EnvironmentParameterSettings] = {}
for environment_parameter, environment_parameter_config in d.items():
if (
isinstance(environment_parameter_config, Mapping)
and "curriculum" in environment_parameter_config
):
d_final[environment_parameter] = strict_to_cls(
environment_parameter_config, EnvironmentParameterSettings
)
EnvironmentParameterSettings._check_lesson_chain(
d_final[environment_parameter].curriculum, environment_parameter
)
else:
sampler = ParameterRandomizationSettings.structure(
environment_parameter_config, ParameterRandomizationSettings
)
d_final[environment_parameter] = EnvironmentParameterSettings(
curriculum=[
Lesson(
completion_criteria=None,
value=sampler,
name=environment_parameter,
)
]
)
return d_final
# TRAINERS #############################################################################
@attr.s(auto_attribs=True)
class SelfPlaySettings:
save_steps: int = 20000
team_change: int = attr.ib()
@team_change.default
def _team_change_default(self):
# Assign team_change to about 4x save_steps
return self.save_steps * 5
swap_steps: int = 2000
window: int = 10
play_against_latest_model_ratio: float = 0.5
initial_elo: float = 1200.0
class TrainerType(Enum):
PPO: str = "ppo"
SAC: str = "sac"
def to_settings(self) -> type:
_mapping = {TrainerType.PPO: PPOSettings, TrainerType.SAC: SACSettings}
return _mapping[self]
@attr.s(auto_attribs=True)
class TrainerSettings(ExportableSettings):
default_override: ClassVar[Optional["TrainerSettings"]] = None
trainer_type: TrainerType = TrainerType.PPO
hyperparameters: HyperparamSettings = attr.ib()
@hyperparameters.default
def _set_default_hyperparameters(self):
return self.trainer_type.to_settings()()
network_settings: NetworkSettings = attr.ib(factory=NetworkSettings)
reward_signals: Dict[RewardSignalType, RewardSignalSettings] = attr.ib(
factory=lambda: {RewardSignalType.EXTRINSIC: RewardSignalSettings()}
)
init_path: Optional[str] = None
keep_checkpoints: int = 5
checkpoint_interval: int = 500000
max_steps: int = 500000
time_horizon: int = 64
summary_freq: int = 50000
threaded: bool = True
self_play: Optional[SelfPlaySettings] = None
behavioral_cloning: Optional[BehavioralCloningSettings] = None
cattr.register_structure_hook(
Dict[RewardSignalType, RewardSignalSettings], RewardSignalSettings.structure
)
@network_settings.validator
def _check_batch_size_seq_length(self, attribute, value):
if self.network_settings.memory is not None:
if (
self.network_settings.memory.sequence_length
> self.hyperparameters.batch_size
):
raise TrainerConfigError(
"When using memory, sequence length must be less than or equal to batch size. "
)
@staticmethod
def dict_to_defaultdict(d: Dict, t: type) -> DefaultDict:
return TrainerSettings.DefaultTrainerDict(
cattr.structure(d, Dict[str, TrainerSettings])
)
@staticmethod
def structure(d: Mapping, t: type) -> Any:
"""
Helper method to structure a TrainerSettings class. Meant to be registered with
cattr.register_structure_hook() and called with cattr.structure().
"""
if not isinstance(d, Mapping):
raise TrainerConfigError(f"Unsupported config {d} for {t.__name__}.")
d_copy: Dict[str, Any] = {}
# Check if a default_settings was specified. If so, used those as the default
# rather than an empty dict.
if TrainerSettings.default_override is not None:
d_copy.update(cattr.unstructure(TrainerSettings.default_override))
deep_update_dict(d_copy, d)
if "framework" in d_copy:
logger.warning("Framework option was deprecated but was specified")
d_copy.pop("framework", None)
for key, val in d_copy.items():
if attr.has(type(val)):
# Don't convert already-converted attrs classes.
continue
if key == "hyperparameters":
if "trainer_type" not in d_copy:
raise TrainerConfigError(
"Hyperparameters were specified but no trainer_type was given."
)
else:
d_copy[key] = strict_to_cls(
d_copy[key], TrainerType(d_copy["trainer_type"]).to_settings()
)
elif key == "max_steps":
d_copy[key] = int(float(val))
# In some legacy configs, max steps was specified as a float
else:
d_copy[key] = check_and_structure(key, val, t)
return t(**d_copy)
class DefaultTrainerDict(collections.defaultdict):
def __init__(self, *args):
# Depending on how this is called, args may have the defaultdict
# callable at the start of the list or not. In particular, unpickling
# will pass [TrainerSettings].
if args and args[0] == TrainerSettings:
super().__init__(*args)
else:
super().__init__(TrainerSettings, *args)
def __missing__(self, key: Any) -> "TrainerSettings":
if TrainerSettings.default_override is not None:
return copy.deepcopy(TrainerSettings.default_override)
else:
return TrainerSettings()
# COMMAND LINE #########################################################################
@attr.s(auto_attribs=True)
class CheckpointSettings:
run_id: str = parser.get_default("run_id")
initialize_from: Optional[str] = parser.get_default("initialize_from")
load_model: bool = parser.get_default("load_model")
resume: bool = parser.get_default("resume")
force: bool = parser.get_default("force")
train_model: bool = parser.get_default("train_model")
inference: bool = parser.get_default("inference")
@attr.s(auto_attribs=True)
class EnvironmentSettings:
env_path: Optional[str] = parser.get_default("env_path")
env_args: Optional[List[str]] = parser.get_default("env_args")
base_port: int = parser.get_default("base_port")
num_envs: int = attr.ib(default=parser.get_default("num_envs"))
seed: int = parser.get_default("seed")
@num_envs.validator
def validate_num_envs(self, attribute, value):
if value > 1 and self.env_path is None:
raise ValueError("num_envs must be 1 if env_path is not set.")
@attr.s(auto_attribs=True)
class EngineSettings:
width: int = parser.get_default("width")
height: int = parser.get_default("height")
quality_level: int = parser.get_default("quality_level")
time_scale: float = parser.get_default("time_scale")
target_frame_rate: int = parser.get_default("target_frame_rate")
capture_frame_rate: int = parser.get_default("capture_frame_rate")
no_graphics: bool = parser.get_default("no_graphics")
@attr.s(auto_attribs=True)
class RunOptions(ExportableSettings):
default_settings: Optional[TrainerSettings] = None
behaviors: DefaultDict[str, TrainerSettings] = attr.ib(
factory=TrainerSettings.DefaultTrainerDict
)
env_settings: EnvironmentSettings = attr.ib(factory=EnvironmentSettings)
engine_settings: EngineSettings = attr.ib(factory=EngineSettings)
environment_parameters: Optional[Dict[str, EnvironmentParameterSettings]] = None
checkpoint_settings: CheckpointSettings = attr.ib(factory=CheckpointSettings)
# These are options that are relevant to the run itself, and not the engine or environment.
# They will be left here.
debug: bool = parser.get_default("debug")
# Strict conversion
cattr.register_structure_hook(EnvironmentSettings, strict_to_cls)
cattr.register_structure_hook(EngineSettings, strict_to_cls)
cattr.register_structure_hook(CheckpointSettings, strict_to_cls)
cattr.register_structure_hook(
Dict[str, EnvironmentParameterSettings], EnvironmentParameterSettings.structure
)
cattr.register_structure_hook(Lesson, strict_to_cls)
cattr.register_structure_hook(
ParameterRandomizationSettings, ParameterRandomizationSettings.structure
)
cattr.register_unstructure_hook(
ParameterRandomizationSettings, ParameterRandomizationSettings.unstructure
)
cattr.register_structure_hook(TrainerSettings, TrainerSettings.structure)
cattr.register_structure_hook(
DefaultDict[str, TrainerSettings], TrainerSettings.dict_to_defaultdict
)
cattr.register_unstructure_hook(collections.defaultdict, defaultdict_to_dict)
@staticmethod
def from_argparse(args: argparse.Namespace) -> "RunOptions":
"""
Takes an argparse.Namespace as specified in `parse_command_line`, loads input configuration files
from file paths, and converts to a RunOptions instance.
:param args: collection of command-line parameters passed to mlagents-learn
:return: RunOptions representing the passed in arguments, with trainer config, curriculum and sampler
configs loaded from files.
"""
argparse_args = vars(args)
config_path = StoreConfigFile.trainer_config_path
# Load YAML
configured_dict: Dict[str, Any] = {
"checkpoint_settings": {},
"env_settings": {},
"engine_settings": {},
}
if config_path is not None:
configured_dict.update(load_config(config_path))
# Use the YAML file values for all values not specified in the CLI.
for key in configured_dict.keys():
# Detect bad config options
if key not in attr.fields_dict(RunOptions):
raise TrainerConfigError(
"The option {} was specified in your YAML file, but is invalid.".format(
key
)
)
# Override with CLI args
# Keep deprecated --load working, TODO: remove
argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"]
for key, val in argparse_args.items():
if key in DetectDefault.non_default_args:
if key in attr.fields_dict(CheckpointSettings):
configured_dict["checkpoint_settings"][key] = val
elif key in attr.fields_dict(EnvironmentSettings):
configured_dict["env_settings"][key] = val
elif key in attr.fields_dict(EngineSettings):
configured_dict["engine_settings"][key] = val
else: # Base options
configured_dict[key] = val
final_runoptions = RunOptions.from_dict(configured_dict)
return final_runoptions
@staticmethod
def from_dict(options_dict: Dict[str, Any]) -> "RunOptions":
# If a default settings was specified, set the TrainerSettings class override
if (
"default_settings" in options_dict.keys()
and options_dict["default_settings"] is not None
):
TrainerSettings.default_override = cattr.structure(
options_dict["default_settings"], TrainerSettings
)
return cattr.structure(options_dict, RunOptions)
|
the-stack_106_32075 | # -*- coding: utf-8 -*-
"""ATA over Ethernet Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import iteritems
class AOE(dpkt.Packet):
"""ATA over Ethernet Protocol.
See more about the AOE on
https://en.wikipedia.org/wiki/ATA_over_Ethernet
Attributes:
__hdr__: Header fields of AOE.
data: Message data.
"""
__hdr__ = (
('ver_fl', 'B', 0x10),
('err', 'B', 0),
('maj', 'H', 0),
('min', 'B', 0),
('cmd', 'B', 0),
('tag', 'I', 0),
)
_cmdsw = {}
@property
def ver(self):
return self.ver_fl >> 4
@ver.setter
def ver(self, ver):
self.ver_fl = (ver << 4) | (self.ver_fl & 0xf)
@property
def fl(self):
return self.ver_fl & 0xf
@fl.setter
def fl(self, fl):
self.ver_fl = (self.ver_fl & 0xf0) | fl
@classmethod
def set_cmd(cls, cmd, pktclass):
cls._cmdsw[cmd] = pktclass
@classmethod
def get_cmd(cls, cmd):
return cls._cmdsw[cmd]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._cmdsw[self.cmd](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
AOE_CMD_ATA = 0
AOE_CMD_CFG = 1
AOE_FLAG_RSP = 1 << 3
def _load_cmds():
prefix = 'AOE_CMD_'
g = globals()
for k, v in iteritems(g):
if k.startswith(prefix):
name = 'aoe' + k[len(prefix):].lower()
try:
mod = __import__(name, g, level=1)
AOE.set_cmd(v, getattr(mod, name.upper()))
except (ImportError, AttributeError):
continue
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not AOE._cmdsw:
_load_cmds()
def test_creation():
aoe = AOE()
# hdr fields
assert aoe.ver_fl == 0x10
assert aoe.err == 0
assert aoe.maj == 0
assert aoe.min == 0
assert aoe.cmd == 0
assert aoe.tag == 0
assert bytes(aoe) == b'\x10' + b'\x00'*9
def test_properties():
aoe = AOE()
# propery getters
assert aoe.ver == 1
assert aoe.fl == 0
# property setters
aoe.ver = 2
assert aoe.ver == 2
assert aoe.ver_fl == 0x20
aoe.fl = 12
assert aoe.fl == 12
assert aoe.ver_fl == 0x2C
def test_unpack():
from binascii import unhexlify
buf = unhexlify(
'1000000000'
'00' # cmd: AOE_CMD_ATA
'00000000' # tag
)
aoe = AOE(buf)
# AOE_CMD_ATA speficied, but no data supplied
assert aoe.data == b''
buf = unhexlify(
'1000000000'
'00' # cmd: AOE_CMD_ATA
'00000000' # tag
# AOEDATA specification
'030a6b190000000045000028941f0000e30699b4232b2400de8e8442abd100500035e1'
'2920d9000000229bf0e204656b'
)
aoe = AOE(buf)
assert aoe.aoeata == aoe.data
def test_cmds():
import dpkt
assert AOE.get_cmd(AOE_CMD_ATA) == dpkt.aoeata.AOEATA
assert AOE.get_cmd(AOE_CMD_CFG) == dpkt.aoecfg.AOECFG
def test_cmd_loading():
# this test checks that failing to load a module isn't catastrophic
standard_cmds = AOE._cmdsw
# delete the existing code->module mappings
AOE._cmdsw = {}
assert not AOE._cmdsw
# create a new global constant pointing to a module which doesn't exist
globals()['AOE_CMD_FAIL'] = "FAIL"
_mod_init()
# check that the same modules were loaded, ignoring the fail
assert AOE._cmdsw == standard_cmds
|
the-stack_106_32076 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sympy
import mpmath
import math
import re
from itertools import chain
from mathics.core.numbers import get_type, dps, prec, min_prec, machine_precision
from mathics.core.convert import sympy_symbol_prefix, SympyExpression
def fully_qualified_symbol_name(name):
return (isinstance(name, str) and
'`' in name and
not name.startswith('`') and
not name.endswith('`') and
'``' not in name)
def valid_context_name(ctx, allow_initial_backquote=False):
return (isinstance(ctx, str) and
ctx.endswith('`') and
'``' not in ctx and
(allow_initial_backquote or not ctx.startswith('`')))
def ensure_context(name):
assert isinstance(name, str)
assert name != ''
if '`' in name:
# Symbol has a context mark -> it came from the parser
assert fully_qualified_symbol_name(name)
return name
# Symbol came from Python code doing something like
# Expression('Plus', ...) -> use System`
return 'System`' + name
def strip_context(name):
if '`' in name:
return name[name.rindex('`') + 1:]
return name
# system_symbols('A', 'B', ...) -> ['System`A', 'System`B', ...]
def system_symbols(*symbols):
return [ensure_context(s) for s in symbols]
# system_symbols_dict({'SomeSymbol': ...}) -> {'System`SomeSymbol': ...}
def system_symbols_dict(d):
return {ensure_context(k): v for k, v in d.items()}
class BoxError(Exception):
def __init__(self, box, form):
super(BoxError, self).__init__(
'Box %s cannot be formatted as %s' % (box, form))
self.box = box
self.form = form
class ExpressionPointer(object):
def __init__(self, parent, position):
self.parent = parent
self.position = position
def replace(self, new):
if self.position == 0:
self.parent.head = new
else:
self.parent.leaves[self.position - 1] = new
def __str__(self):
return '%s[[%s]]' % (self.parent, self.position)
def from_python(arg):
number_type = get_type(arg)
if isinstance(arg, int) or number_type == 'z':
return Integer(arg)
elif isinstance(arg, float) or number_type == 'f':
return Real(arg)
elif number_type == 'q':
return Rational(arg)
elif isinstance(arg, complex):
return Complex(Real(arg.real), Real(arg.imag))
elif number_type == 'c':
return Complex(arg.real, arg.imag)
elif isinstance(arg, str):
return String(arg)
# if arg[0] == arg[-1] == '"':
# return String(arg[1:-1])
# else:
# return Symbol(arg)
elif isinstance(arg, BaseExpression):
return arg
elif isinstance(arg, list) or isinstance(arg, tuple):
return Expression('List', *[from_python(leaf) for leaf in arg])
else:
raise NotImplementedError
class KeyComparable(object):
def get_sort_key(self):
raise NotImplemented
def __lt__(self, other):
return self.get_sort_key() < other.get_sort_key()
def __gt__(self, other):
return self.get_sort_key() > other.get_sort_key()
def __le__(self, other):
return self.get_sort_key() <= other.get_sort_key()
def __ge__(self, other):
return self.get_sort_key() >= other.get_sort_key()
def __eq__(self, other):
return self.get_sort_key() == other.get_sort_key()
def __ne__(self, other):
return self.get_sort_key() != other.get_sort_key()
class BaseExpression(KeyComparable):
def __new__(cls, *args, **kwargs):
self = object.__new__(cls)
self.options = None
self.pattern_sequence = False
self.unformatted = self
self.last_evaluated = None
return self
def sequences(self):
return None
def flatten_sequence(self):
return self
def flatten_pattern_sequence(self):
return self
def get_attributes(self, definitions):
return set()
def evaluate_next(self, evaluation):
return self.evaluate(evaluation), False
def evaluate(self, evaluation):
evaluation.check_stopped()
return self
def get_atoms(self, include_heads=True):
return []
def get_name(self):
" Returns symbol's name if Symbol instance "
return ''
def is_symbol(self):
return False
def is_machine_precision(self):
return False
def get_lookup_name(self):
" Returns symbol name of leftmost head "
return self.get_name()
def get_head(self):
return None
def get_head_name(self):
return self.get_head().get_name()
def get_leaves(self):
return []
def get_int_value(self):
return None
def get_float_value(self, permit_complex=False):
return None
def get_string_value(self):
return None
def is_atom(self):
return False
def is_true(self):
return False
def is_numeric(self):
# used by NumericQ and expression ordering
return False
def flatten(self, head, pattern_only=False, callback=None):
return self
def __hash__(self):
"""
To allow usage of expression as dictionary keys,
as in Expression.get_pre_choices
"""
raise NotImplementedError
def user_hash(self, update):
# whereas __hash__ is for internal Mathics purposes like using Expressions as dictionary keys and fast
# comparison of elements, user_hash is called for Hash[]. user_hash should strive to give stable results
# across versions, whereas __hash__ must not. user_hash should try to hash all the data available, whereas
# __hash__ might only hash a sample of the data available.
raise NotImplementedError
def same(self, other):
pass
def get_sequence(self):
if self.get_head().get_name() == 'System`Sequence':
return self.leaves
else:
return [self]
def evaluate_leaves(self, evaluation):
return self
def apply_rules(self, rules, evaluation, level=0, options=None):
if options:
l1, l2 = options['levelspec']
if level < l1:
return self, False
elif l2 is not None and level > l2:
return self, False
for rule in rules:
result = rule.apply(self, evaluation, fully=False)
if result is not None:
return result, True
return self, False
def do_format(self, evaluation, form):
formats = system_symbols(
'InputForm', 'OutputForm', 'StandardForm',
'FullForm', 'TraditionalForm', 'TeXForm', 'MathMLForm')
evaluation.inc_recursion_depth()
try:
expr = self
head = self.get_head_name()
include_form = False
if head in formats and len(self.get_leaves()) == 1:
expr = self.leaves[0]
if not (form == 'System`OutputForm' and head == 'System`StandardForm'):
form = head
include_form = True
unformatted = expr
def format_expr(expr):
if not(expr.is_atom()) and not(expr.head.is_atom()):
# expr is of the form f[...][...]
return None
name = expr.get_lookup_name()
formats = evaluation.definitions.get_formats(name, form)
for rule in formats:
result = rule.apply(expr, evaluation)
if result is not None and result != expr:
return result.evaluate(evaluation)
return None
if form != 'System`FullForm':
formatted = format_expr(expr)
if formatted is not None:
result = formatted.do_format(evaluation, form)
if include_form:
result = Expression(form, result)
result.unformatted = unformatted
return result
head = expr.get_head_name()
if head in formats:
expr = expr.do_format(evaluation, form)
elif (head != 'System`NumberForm' and not expr.is_atom() and
head != 'System`Graphics'):
new_leaves = [leaf.do_format(evaluation, form)
for leaf in expr.leaves]
expr = Expression(
expr.head.do_format(evaluation, form), *new_leaves)
if include_form:
expr = Expression(form, expr)
expr.unformatted = unformatted
return expr
finally:
evaluation.dec_recursion_depth()
def format(self, evaluation, form):
expr = self.do_format(evaluation, form)
result = Expression(
'MakeBoxes', expr, Symbol(form)).evaluate(evaluation)
return result
def is_free(self, form, evaluation):
from mathics.builtin.patterns import item_is_free
return item_is_free(self, form, evaluation)
def is_inexact(self):
return self.get_precision() is not None
def get_precision(self):
return None
def get_option_values(self, evaluation, allow_symbols=False,
stop_on_error=True):
options = self
if options.has_form('List', None):
options = options.flatten(Symbol('List'))
values = options.leaves
else:
values = [options]
option_values = {}
for option in values:
symbol_name = option.get_name()
if allow_symbols and symbol_name:
options = evaluation.definitions.get_options(symbol_name)
option_values.update(options)
else:
if not option.has_form(('Rule', 'RuleDelayed'), 2):
if stop_on_error:
return None
else:
continue
name = option.leaves[0].get_name()
if not name and isinstance(option.leaves[0], String):
name = ensure_context(option.leaves[0].get_string_value())
if not name:
if stop_on_error:
return None
else:
continue
option_values[name] = option.leaves[1]
return option_values
def get_rules_list(self):
from mathics.core.rules import Rule
list_expr = self.flatten(Symbol('List'))
list = []
if list_expr.has_form('List', None):
list.extend(list_expr.leaves)
else:
list.append(list_expr)
rules = []
for item in list:
if not item.has_form(('Rule', 'RuleDelayed'), 2):
return None
rule = Rule(item.leaves[0], item.leaves[1])
rules.append(rule)
return rules
def to_sympy(self, **kwargs):
raise NotImplementedError
def to_mpmath(self):
return None
def round_to_float(self, evaluation=None, permit_complex=False):
'''
Try to round to python float. Return None if not possible.
'''
if evaluation is None:
value = self
else:
value = Expression('N', self).evaluate(evaluation)
if isinstance(value, Number):
value = value.round()
return value.get_float_value(permit_complex=permit_complex)
def __abs__(self):
return Expression('Abs', self)
def __pos__(self):
return self
def __neg__(self):
return Expression('Times', self, -1)
def __add__(self, other):
return Expression('Plus', self, other)
def __sub__(self, other):
return Expression('Plus', self, Expression('Times', other, -1))
def __mul__(self, other):
return Expression('Times', self, other)
def __truediv__(self, other):
return Expression('Divide', self, other)
def __floordiv__(self, other):
return Expression('Floor', Expression('Divide', self, other))
def __pow__(self, other):
return Expression('Power', self, other)
class Monomial(object):
"""
An object to sort monomials, used in Expression.get_sort_key and
Symbol.get_sort_key.
"""
def __init__(self, exps_dict):
self.exps = exps_dict
def __lt__(self, other):
return self.__cmp(other) < 0
def __gt__(self, other):
return self.__cmp(other) > 0
def __le__(self, other):
return self.__cmp(other) <= 0
def __ge__(self, other):
return self.__cmp(other) >= 0
def __eq__(self, other):
return self.__cmp(other) == 0
def __ne__(self, other):
return self.__cmp(other) != 0
def __cmp(self, other):
self_exps = self.exps.copy()
other_exps = other.exps.copy()
for var in self.exps:
if var in other.exps:
dec = min(self_exps[var], other_exps[var])
self_exps[var] -= dec
if not self_exps[var]:
del self_exps[var]
other_exps[var] -= dec
if not other_exps[var]:
del other_exps[var]
self_exps = sorted((var, exp) for var, exp in self_exps.items())
other_exps = sorted((var, exp) for var, exp in other_exps.items())
index = 0
self_len = len(self_exps)
other_len = len(other_exps)
while True:
if index >= self_len and index >= other_len:
return 0
if index >= self_len:
return -1 # self < other
if index >= other_len:
return 1 # self > other
self_var, self_exp = self_exps[index]
other_var, other_exp = other_exps[index]
if self_var < other_var:
return -1
if self_var > other_var:
return 1
if self_exp != other_exp:
if index + 1 == self_len or index + 1 == other_len:
# smaller exponents first
if self_exp < other_exp:
return -1
elif self_exp == other_exp:
return 0
else:
return 1
else:
# bigger exponents first
if self_exp < other_exp:
return 1
elif self_exp == other_exp:
return 0
else:
return -1
index += 1
return 0
def _sequences(leaves):
for i, leaf in enumerate(leaves):
if leaf.get_head_name() == 'System`Sequence' or leaf.sequences():
yield i
class Expression(BaseExpression):
def __new__(cls, head, *leaves):
self = super(Expression, cls).__new__(cls)
if isinstance(head, str):
head = Symbol(head)
self.head = head
self.leaves = [from_python(leaf) for leaf in leaves]
self._sequences = None
return self
def sequences(self):
seq = self._sequences
if seq is None:
seq = list(_sequences(self.leaves))
self._sequences = seq
return seq
def _flatten_sequence(self, sequence):
indices = self.sequences()
if not indices:
return self
leaves = self.leaves
flattened = []
extend = flattened.extend
k = 0
for i in indices:
extend(leaves[k:i])
extend(sequence(leaves[i]))
k = i + 1
extend(leaves[k:])
return Expression(self.head, *flattened)
def flatten_sequence(self):
def sequence(leaf):
if leaf.get_head_name() == 'System`Sequence':
return leaf.leaves
else:
return [leaf]
return self._flatten_sequence(sequence)
def flatten_pattern_sequence(self):
def sequence(leaf):
flattened = leaf.flatten_pattern_sequence()
if leaf.get_head_name() == 'System`Sequence' and leaf.pattern_sequence:
return flattened.leaves
else:
return [flattened]
expr = self._flatten_sequence(sequence)
if hasattr(self, 'options'):
expr.options = self.options
return expr
def copy(self):
result = Expression(
self.head.copy(), *[leaf.copy() for leaf in self.leaves])
result._sequences = self._sequences
result.options = self.options
result.original = self
# result.last_evaluated = self.last_evaluated
return result
def shallow_copy(self):
# this is a minimal, shallow copy: head, leaves are shared with
# the original, only the Expression instance is new.
expr = Expression(self.head)
expr.leaves = self.leaves
expr._sequences = self._sequences
expr.options = self.options
expr.last_evaluated = self.last_evaluated
return expr
def set_positions(self, position=None):
self.position = position
self.head.set_positions(ExpressionPointer(self, 0))
for index, leaf in enumerate(self.leaves):
leaf.set_positions(ExpressionPointer(self, index + 1))
def get_head(self):
return self.head
def get_leaves(self):
return self.leaves
def get_lookup_name(self):
return self.head.get_lookup_name()
def has_form(self, heads, *leaf_counts):
"""
leaf_counts:
(,): no leaves allowed
(None,): no constraint on number of leaves
(n, None): leaf count >= n
(n1, n2, ...): leaf count in {n1, n2, ...}
"""
head_name = self.head.get_name()
if isinstance(heads, (tuple, list, set)):
if head_name not in [ensure_context(h) for h in heads]:
return False
else:
if head_name != ensure_context(heads):
return False
if not leaf_counts:
return False
if leaf_counts and leaf_counts[0] is not None:
count = len(self.leaves)
if count not in leaf_counts:
if (len(leaf_counts) == 2 and # noqa
leaf_counts[1] is None and count >= leaf_counts[0]):
return True
else:
return False
return True
def has_symbol(self, symbol_name):
return self.head.has_symbol(symbol_name) or any(
leaf.has_symbol(symbol_name) for leaf in self.leaves)
def to_sympy(self, **kwargs):
from mathics.builtin import mathics_to_sympy
if 'converted_functions' in kwargs:
functions = kwargs['converted_functions']
if len(self.leaves) > 0 and self.get_head_name() in functions:
sym_args = [leaf.to_sympy() for leaf in self.leaves]
if None in sym_args:
return None
func = sympy.Function(str(
sympy_symbol_prefix + self.get_head_name()))(*sym_args)
return func
lookup_name = self.get_lookup_name()
builtin = mathics_to_sympy.get(lookup_name)
if builtin is not None:
sympy_expr = builtin.to_sympy(self, **kwargs)
if sympy_expr is not None:
return sympy_expr
return SympyExpression(self)
def to_python(self, *args, **kwargs):
"""
Convert the Expression to a Python object:
List[...] -> Python list
DirectedInfinity[1] -> inf
DirectedInfinity[-1] -> -inf
True/False -> True/False
Null -> None
Symbol -> '...'
String -> '"..."'
numbers -> Python number
If kwarg n_evaluation is given, apply N first to the expression.
"""
n_evaluation = kwargs.get('n_evaluation')
if n_evaluation is not None:
value = Expression('N', self).evaluate(n_evaluation)
return value.to_python()
head_name = self.head.get_name()
if head_name == 'System`List':
return [leaf.to_python(*args, **kwargs) for leaf in self.leaves]
if head_name == 'System`DirectedInfinity' and len(self.leaves) == 1:
direction = self.leaves[0].get_int_value()
if direction == 1:
return float('inf')
if direction == -1:
return -float('inf')
return self
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
"""
Pattern sort key structure:
0: 0/2: Atom / Expression
1: pattern: 0 / 11-31 for blanks / 1 for empty Alternatives /
40 for OptionsPattern
2: 0/1: 0 for PatternTest
3: 0/1: 0 for Pattern
4: 0/1: 1 for Optional
5: head / 0 for atoms
6: leaves / 0 for atoms
7: 0/1: 0 for Condition
"""
name = self.head.get_name()
pattern = 0
if name == 'System`Blank':
pattern = 1
elif name == 'System`BlankSequence':
pattern = 2
elif name == 'System`BlankNullSequence':
pattern = 3
if pattern > 0:
if self.leaves:
pattern += 10
else:
pattern += 20
if pattern > 0:
return [2, pattern, 1, 1, 0, self.head.get_sort_key(True),
[leaf.get_sort_key(True) for leaf in self.leaves], 1]
if name == 'System`PatternTest':
if len(self.leaves) != 2:
return [3, 0, 0, 0, 0, self.head, self.leaves, 1]
sub = self.leaves[0].get_sort_key(True)
sub[2] = 0
return sub
elif name == 'System`Condition':
if len(self.leaves) != 2:
return [3, 0, 0, 0, 0, self.head, self.leaves, 1]
sub = self.leaves[0].get_sort_key(True)
sub[7] = 0
return sub
elif name == 'System`Pattern':
if len(self.leaves) != 2:
return [3, 0, 0, 0, 0, self.head, self.leaves, 1]
sub = self.leaves[1].get_sort_key(True)
sub[3] = 0
return sub
elif name == 'System`Optional':
if len(self.leaves) not in (1, 2):
return [3, 0, 0, 0, 0, self.head, self.leaves, 1]
sub = self.leaves[0].get_sort_key(True)
sub[4] = 1
return sub
elif name == 'System`Alternatives':
min_key = [4]
min = None
for leaf in self.leaves:
key = leaf.get_sort_key(True)
if key < min_key:
min = leaf
min_key = key
if min is None:
# empty alternatives -> very restrictive pattern
return [2, 1]
return min_key
elif name == 'System`Verbatim':
if len(self.leaves) != 1:
return [3, 0, 0, 0, 0, self.head, self.leaves, 1]
return self.leaves[0].get_sort_key(True)
elif name == 'System`OptionsPattern':
return [2, 40, 0, 1, 1, 0, self.head, self.leaves, 1]
else:
# Append [4] to leaves so that longer expressions have higher
# precedence
return [
2, 0, 1, 1, 0, self.head.get_sort_key(True),
[leaf.get_sort_key(True) for leaf in self.leaves] + [[4]],
1]
else:
exps = {}
head = self.head.get_name()
if head == 'System`Times':
for leaf in self.leaves:
name = leaf.get_name()
if leaf.has_form('Power', 2):
var = leaf.leaves[0].get_name()
exp = leaf.leaves[1].round_to_float()
if var and exp is not None:
exps[var] = exps.get(var, 0) + exp
elif name:
exps[name] = exps.get(name, 0) + 1
elif self.has_form('Power', 2):
var = self.leaves[0].get_name()
exp = self.leaves[1].round_to_float()
if var and exp is not None:
exps[var] = exps.get(var, 0) + exp
if exps:
return [1 if self.is_numeric() else 2, 2, Monomial(exps), 1,
self.head, self.leaves, 1]
else:
return [1 if self.is_numeric() else 2, 3, self.head,
self.leaves, 1]
def same(self, other):
if id(self) == id(other):
return True
if self.get_head_name() != other.get_head_name():
return False
if not self.head.same(other.get_head()):
return False
if len(self.leaves) != len(other.get_leaves()):
return False
for leaf, other in zip(self.leaves, other.get_leaves()):
if not leaf.same(other):
return False
return True
def flatten(self, head, pattern_only=False, callback=None, level=None):
if level is not None and level <= 0:
return self
sub_level = None if level is None else level - 1
do_flatten = False
for leaf in self.leaves:
if leaf.get_head().same(head) and (not pattern_only or leaf.pattern_sequence):
do_flatten = True
break
if do_flatten:
new_leaves = []
for leaf in self.leaves:
if leaf.get_head().same(head) and (not pattern_only or leaf.pattern_sequence):
new_leaf = leaf.flatten(head, pattern_only, callback, level=sub_level)
if callback is not None:
callback(new_leaf.leaves, leaf)
new_leaves.extend(new_leaf.leaves)
else:
new_leaves.append(leaf)
return Expression(self.head, *new_leaves)
else:
return self
def evaluate(self, evaluation):
from mathics.core.evaluation import ReturnInterrupt
expr = self
reevaluate = True
limit = None
iteration = 1
names = set()
definitions = evaluation.definitions
old_options = evaluation.options
evaluation.inc_recursion_depth()
try:
while reevaluate:
# changed before last evaluated?
if expr.last_evaluated is not None and definitions.last_changed(expr) <= expr.last_evaluated:
break
names.add(expr.get_lookup_name())
if hasattr(expr, 'options') and expr.options:
evaluation.options = expr.options
expr, reevaluate = expr.evaluate_next(evaluation)
if not reevaluate:
break
iteration += 1
if limit is None:
limit = definitions.get_config_value('$IterationLimit')
if limit is None:
limit = 'inf'
if limit != 'inf' and iteration > limit:
evaluation.error('$IterationLimit', 'itlim', limit)
return Symbol('$Aborted')
# "Return gets discarded only if it was called from within the r.h.s.
# of a user-defined rule."
# http://mathematica.stackexchange.com/questions/29353/how-does-return-work
# Otherwise it propogates up.
#
except ReturnInterrupt as ret:
if names.intersection(definitions.user.keys()):
return ret.expr
else:
raise ret
finally:
evaluation.options = old_options
evaluation.dec_recursion_depth()
return expr
def evaluate_next(self, evaluation):
head = self.head.evaluate(evaluation)
attributes = head.get_attributes(evaluation.definitions)
leaves = self.leaves[:]
def rest_range(indices):
if 'System`HoldAllComplete' not in attributes:
for index in indices:
leaf = leaves[index]
if leaf.has_form('Evaluate', 1):
leaves[index] = leaf.evaluate(evaluation)
def eval_range(indices):
for index in indices:
leaf = leaves[index]
if not leaf.has_form('Unevaluated', 1):
leaves[index] = leaf.evaluate(evaluation)
if 'System`HoldAll' in attributes or 'System`HoldAllComplete' in attributes:
# eval_range(range(0, 0))
rest_range(range(len(leaves)))
elif 'System`HoldFirst' in attributes:
rest_range(range(0, min(1, len(leaves))))
eval_range(range(1, len(leaves)))
elif 'System`HoldRest' in attributes:
eval_range(range(0, min(1, len(leaves))))
rest_range(range(1, len(leaves)))
else:
eval_range(range(len(leaves)))
# rest_range(range(0, 0))
new = Expression(head, *leaves)
if ('System`SequenceHold' not in attributes and # noqa
'System`HoldAllComplete' not in attributes):
new = new.flatten_sequence()
leaves = new.leaves
for leaf in leaves:
leaf.unevaluated = False
if 'System`HoldAllComplete' not in attributes:
dirty_new = False
for index, leaf in enumerate(leaves):
if leaf.has_form('Unevaluated', 1):
leaves[index] = leaf.leaves[0]
leaves[index].unevaluated = True
dirty_new = True
if dirty_new:
new = Expression(head, *leaves)
def flatten_callback(new_leaves, old):
for leaf in new_leaves:
leaf.unevaluated = old.unevaluated
if 'System`Flat' in attributes:
new = new.flatten(new.head, callback=flatten_callback)
if 'System`Orderless' in attributes:
new.sort()
new.last_evaluated = evaluation.definitions.now
if 'System`Listable' in attributes:
done, threaded = new.thread(evaluation)
if done:
if threaded.same(new):
new.last_evaluated = evaluation.definitions.now
return new, False
else:
return threaded, True
def rules():
rules_names = set()
if 'System`HoldAllComplete' not in attributes:
for leaf in leaves:
name = leaf.get_lookup_name()
if len(name) > 0: # only lookup rules if this is a symbol
if name not in rules_names:
rules_names.add(name)
for rule in evaluation.definitions.get_upvalues(name):
yield rule
lookup_name = new.get_lookup_name()
if lookup_name == new.get_head_name():
for rule in evaluation.definitions.get_downvalues(lookup_name):
yield rule
else:
for rule in evaluation.definitions.get_subvalues(lookup_name):
yield rule
for rule in rules():
result = rule.apply(new, evaluation, fully=False)
if result is not None:
if result.same(new):
new.last_evaluated = evaluation.definitions.now
return new, False
else:
return result, True
# Expression did not change, re-apply Unevaluated
for index, leaf in enumerate(new.leaves):
if leaf.unevaluated:
new.leaves[index] = Expression('Unevaluated', leaf)
new.unformatted = self.unformatted
new.last_evaluated = evaluation.definitions.now
return new, False
def evaluate_leaves(self, evaluation):
leaves = [leaf.evaluate(evaluation) for leaf in self.leaves]
head = self.head.evaluate_leaves(evaluation)
return Expression(head, *leaves)
def __str__(self):
return '%s[%s]' % (
self.head, ', '.join([str(leaf) for leaf in self.leaves]))
def __repr__(self):
return '<Expression: %s>' % self
def process_style_box(self, options):
if self.has_form('StyleBox', 1, None):
rules = self.leaves[1:]
for rule in rules:
if rule.has_form('Rule', 2):
name = rule.leaves[0].get_name()
value = rule.leaves[1]
if name == 'System`ShowStringCharacters':
value = value.is_true()
options = options.copy()
options['show_string_characters'] = value
elif name == 'System`ImageSizeMultipliers':
if value.has_form('List', 2):
m1 = value.leaves[0].round_to_float()
m2 = value.leaves[1].round_to_float()
if m1 is not None and m2 is not None:
options = options.copy()
options['image_size_multipliers'] = (m1, m2)
return True, options
else:
return False, options
def boxes_to_text(self, **options):
from mathics.builtin import box_constructs
from mathics.builtin.base import BoxConstructError
is_style, options = self.process_style_box(options)
if is_style:
return self.leaves[0].boxes_to_text(**options)
head = self.head.get_name()
box_construct = box_constructs.get(head)
if box_construct is not None:
try:
return box_construct.boxes_to_text(self.leaves, **options)
except BoxConstructError:
raise BoxError(self, 'text')
if (self.has_form('RowBox', 1) and # nopep8
self.leaves[0].has_form('List', None)):
return ''.join([leaf.boxes_to_text(**options)
for leaf in self.leaves[0].leaves])
elif self.has_form('SuperscriptBox', 2):
return '^'.join([leaf.boxes_to_text(**options)
for leaf in self.leaves])
else:
raise BoxError(self, 'text')
def boxes_to_xml(self, **options):
from mathics.builtin import box_constructs
from mathics.builtin.base import BoxConstructError
is_style, options = self.process_style_box(options)
if is_style:
return self.leaves[0].boxes_to_xml(**options)
head = self.head.get_name()
box_construct = box_constructs.get(head)
if box_construct is not None:
try:
return box_construct.boxes_to_xml(self.leaves, **options)
except BoxConstructError:
# raise # uncomment this to see what is going wrong in
# constructing boxes
raise BoxError(self, 'xml')
name = self.head.get_name()
if (name == 'System`RowBox' and len(self.leaves) == 1 and # nopep8
self.leaves[0].get_head_name() == 'System`List'):
result = []
inside_row = options.get('inside_row')
# inside_list = options.get('inside_list')
options = options.copy()
def is_list_interior(content):
if (content.has_form('List', None) and
all(leaf.get_string_value() == ','
for leaf in content.leaves[1::2])):
return True
return False
is_list_row = False
if (len(self.leaves[0].leaves) == 3 and # nopep8
self.leaves[0].leaves[0].get_string_value() == '{' and
self.leaves[0].leaves[2].get_string_value() == '}' and
self.leaves[0].leaves[1].has_form('RowBox', 1)):
content = self.leaves[0].leaves[1].leaves[0]
if is_list_interior(content):
is_list_row = True
if not inside_row and is_list_interior(self.leaves[0]):
is_list_row = True
if is_list_row:
options['inside_list'] = True
else:
options['inside_row'] = True
for leaf in self.leaves[0].get_leaves():
result.append(leaf.boxes_to_xml(**options))
return '<mrow>%s</mrow>' % ' '.join(result)
else:
options = options.copy()
options['inside_row'] = True
if name == 'System`SuperscriptBox' and len(self.leaves) == 2:
return '<msup>%s %s</msup>' % (
self.leaves[0].boxes_to_xml(**options),
self.leaves[1].boxes_to_xml(**options))
if name == 'System`SubscriptBox' and len(self.leaves) == 2:
return '<msub>%s %s</msub>' % (
self.leaves[0].boxes_to_xml(**options),
self.leaves[1].boxes_to_xml(**options))
if name == 'System`SubsuperscriptBox' and len(self.leaves) == 3:
return '<msubsup>%s %s %s</msubsup>' % (
self.leaves[0].boxes_to_xml(**options),
self.leaves[1].boxes_to_xml(**options),
self.leaves[2].boxes_to_xml(**options))
elif name == 'System`FractionBox' and len(self.leaves) == 2:
return '<mfrac>%s %s</mfrac>' % (
self.leaves[0].boxes_to_xml(**options),
self.leaves[1].boxes_to_xml(**options))
elif name == 'System`SqrtBox' and len(self.leaves) == 1:
return '<msqrt>%s</msqrt>' % (
self.leaves[0].boxes_to_xml(**options))
else:
raise BoxError(self, 'xml')
def boxes_to_tex(self, **options):
from mathics.builtin import box_constructs
from mathics.builtin.base import BoxConstructError
def block(tex, only_subsup=False):
if len(tex) == 1:
return tex
else:
if not only_subsup or '_' in tex or '^' in tex:
return '{%s}' % tex
else:
return tex
is_style, options = self.process_style_box(options)
if is_style:
return self.leaves[0].boxes_to_tex(**options)
head = self.head.get_name()
box_construct = box_constructs.get(head)
if box_construct is not None:
try:
return box_construct.boxes_to_tex(self.leaves, **options)
except BoxConstructError:
raise BoxError(self, 'tex')
name = self.head.get_name()
if (name == 'System`RowBox' and len(self.leaves) == 1 and # nopep8
self.leaves[0].get_head_name() == 'System`List'):
return ''.join([leaf.boxes_to_tex(**options)
for leaf in self.leaves[0].get_leaves()])
elif name == 'System`SuperscriptBox' and len(self.leaves) == 2:
tex1 = self.leaves[0].boxes_to_tex(**options)
sup_string = self.leaves[1].get_string_value()
if sup_string == '\u2032':
return "%s'" % tex1
elif sup_string == '\u2032\u2032':
return "%s''" % tex1
else:
return '%s^%s' % (
block(tex1, True),
block(self.leaves[1].boxes_to_tex(**options)))
elif name == 'System`SubscriptBox' and len(self.leaves) == 2:
return '%s_%s' % (
block(self.leaves[0].boxes_to_tex(**options), True),
block(self.leaves[1].boxes_to_tex(**options)))
elif name == 'System`SubsuperscriptBox' and len(self.leaves) == 3:
return '%s_%s^%s' % (
block(self.leaves[0].boxes_to_tex(**options), True),
block(self.leaves[1].boxes_to_tex(**options)),
block(self.leaves[2].boxes_to_tex(**options)))
elif name == 'System`FractionBox' and len(self.leaves) == 2:
return '\\frac{%s}{%s}' % (
self.leaves[0].boxes_to_tex(**options),
self.leaves[1].boxes_to_tex(**options))
elif name == 'System`SqrtBox' and len(self.leaves) == 1:
return '\\sqrt{%s}' % self.leaves[0].boxes_to_tex(**options)
else:
raise BoxError(self, 'tex')
def default_format(self, evaluation, form):
return '%s[%s]' % (self.head.default_format(evaluation, form),
', '.join([leaf.default_format(evaluation, form)
for leaf in self.leaves]))
def sort(self, pattern=False):
" Sort the leaves according to internal ordering. "
if pattern:
self.leaves.sort(key=lambda e: e.get_sort_key(pattern_sort=True))
else:
self.leaves.sort()
def filter_leaves(self, head_name):
# TODO: should use sorting
head_name = ensure_context(head_name)
return [leaf for leaf in self.leaves
if leaf.get_head_name() == head_name]
def apply_rules(self, rules, evaluation, level=0, options=None):
"""for rule in rules:
result = rule.apply(self, evaluation, fully=False)
if result is not None:
return result"""
# to be able to access it inside inner function
new_applied = [False]
def apply_leaf(leaf):
new, sub_applied = leaf.apply_rules(
rules, evaluation, level + 1, options)
new_applied[0] = new_applied[0] or sub_applied
return new
def descend(expr):
return Expression(expr.head, *[apply_leaf(leaf) for leaf in expr.leaves])
if options is None: # default ReplaceAll mode; replace breadth first
result, applied = super(
Expression, self).apply_rules(rules, evaluation, level, options)
if applied:
return result, True
head, applied = self.head.apply_rules(rules, evaluation, level, options)
new_applied[0] = applied
return descend(Expression(head, *self.leaves)), new_applied[0]
else: # Replace mode; replace depth first
expr = descend(self)
expr, applied = super(
Expression, expr).apply_rules(rules, evaluation, level, options)
new_applied[0] = new_applied[0] or applied
if not applied and options['heads']:
# heads in Replace are treated at the level of the arguments, i.e. level + 1
head, applied = expr.head.apply_rules(rules, evaluation, level + 1, options)
new_applied[0] = new_applied[0] or applied
expr = Expression(head, *expr.leaves)
return expr, new_applied[0]
def replace_vars(self, vars, options=None,
in_scoping=True, in_function=True):
from mathics.builtin.scoping import get_scoping_vars
if not in_scoping:
if (self.head.get_name() in ('System`Module', 'System`Block', 'System`With') and
len(self.leaves) > 0): # nopep8
scoping_vars = set(name for name, new_def in get_scoping_vars(self.leaves[0]))
"""for var in new_vars:
if var in scoping_vars:
del new_vars[var]"""
vars = {var: value for var, value in vars.items()
if var not in scoping_vars}
leaves = self.leaves
if in_function:
if (self.head.get_name() == 'System`Function' and
len(self.leaves) > 1 and
(self.leaves[0].has_form('List', None) or
self.leaves[0].get_name())):
if self.leaves[0].get_name():
func_params = [self.leaves[0].get_name()]
else:
func_params = [leaf.get_name()
for leaf in self.leaves[0].leaves]
if '' not in func_params:
body = self.leaves[1]
replacement = {name: Symbol(name + '$') for name in func_params}
func_params = [Symbol(name + '$') for name in func_params]
body = body.replace_vars(replacement, options, in_scoping)
leaves = [Expression('List', *func_params), body] + \
self.leaves[2:]
if not vars: # might just be a symbol set via Set[] we looked up here
return self.shallow_copy()
return Expression(
self.head.replace_vars(
vars, options=options, in_scoping=in_scoping),
*[leaf.replace_vars(vars, options=options, in_scoping=in_scoping)
for leaf in leaves])
def replace_slots(self, slots, evaluation):
if self.head.get_name() == 'System`Slot':
if len(self.leaves) != 1:
evaluation.message_args('Slot', len(self.leaves), 1)
else:
slot = self.leaves[0].get_int_value()
if slot is None or slot < 0:
evaluation.message('Function', 'slot', self.leaves[0])
elif slot > len(slots) - 1:
evaluation.message('Function', 'slotn', slot)
else:
return slots[int(slot)]
elif self.head.get_name() == 'System`SlotSequence':
if len(self.leaves) != 1:
evaluation.message_args('SlotSequence', len(self.leaves), 1)
else:
slot = self.leaves[0].get_int_value()
if slot is None or slot < 1:
evaluation.error('Function', 'slot', self.leaves[0])
return Expression('Sequence', *slots[slot:])
elif (self.head.get_name() == 'System`Function' and
len(self.leaves) == 1):
# do not replace Slots in nested Functions
return self
return Expression(self.head.replace_slots(slots, evaluation),
*[leaf.replace_slots(slots, evaluation)
for leaf in self.leaves])
def thread(self, evaluation, head=None):
if head is None:
head = Symbol('List')
items = []
dim = None
for leaf in self.leaves:
if leaf.get_head().same(head):
if dim is None:
dim = len(leaf.leaves)
items = [(items + [leaf]) for leaf in leaf.leaves]
elif len(leaf.leaves) != dim:
evaluation.message('Thread', 'tdlen')
return True, self
else:
for index in range(dim):
items[index].append(leaf.leaves[index])
else:
if dim is None:
items.append(leaf)
else:
for item in items:
item.append(leaf)
if dim is None:
return False, self
else:
leaves = [Expression(self.head, *item) for item in items]
return True, Expression(head, *leaves)
def is_numeric(self):
return (self.head.get_name() in system_symbols(
'Sqrt', 'Times', 'Plus', 'Subtract', 'Minus', 'Power', 'Abs',
'Divide', 'Sin') and
all(leaf.is_numeric() for leaf in self.leaves))
# TODO: complete list of numeric functions, or access NumericFunction
# attribute
def numerify(self, evaluation):
_prec = None
for leaf in self.leaves:
if leaf.is_inexact():
leaf_prec = leaf.get_precision()
if _prec is None or leaf_prec < _prec:
_prec = leaf_prec
if _prec is not None:
new_leaves = self.leaves[:]
for index in range(len(self.leaves)):
leaf = self.leaves[index]
# Don't "numerify" numbers: they should be numerified
# automatically by the processing function,
# and we don't want to lose exactness in e.g. 1.0+I.
if not isinstance(leaf, Number):
n_expr = Expression('N', leaf, Integer(dps(_prec)))
n_result = n_expr.evaluate(evaluation)
if isinstance(n_result, Number):
new_leaves[index] = n_result
return Expression(self.head, *new_leaves)
else:
return self
def get_atoms(self, include_heads=True):
if include_heads:
atoms = self.head.get_atoms()
else:
atoms = []
for leaf in self.leaves:
atoms.extend(leaf.get_atoms())
return atoms
def __hash__(self):
return hash(('Expression', self.head) + tuple(self.leaves))
def user_hash(self, update):
update(("%s>%d>" % (self.get_head_name(), len(self.leaves))).encode('utf8'))
for leaf in self.leaves:
leaf.user_hash(update)
def __getnewargs__(self):
return (self.head, self.leaves)
class Atom(BaseExpression):
def is_atom(self):
return True
def has_form(self, heads, *leaf_counts):
if leaf_counts:
return False
name = self.get_atom_name()
if isinstance(heads, tuple):
return name in heads
else:
return heads == name
def has_symbol(self, symbol_name):
return False
def get_head(self):
return Symbol(self.get_atom_name())
def get_atom_name(self):
return self.__class__.__name__
def __repr__(self):
return '<%s: %s>' % (self.get_atom_name(), self)
def replace_vars(self, vars, options=None, in_scoping=True):
return self
def replace_slots(self, slots, evaluation):
return self
def numerify(self, evaluation):
return self
def copy(self):
result = self.do_copy()
result.original = self
return result
def set_positions(self, position=None):
self.position = position
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return [0, 0, 1, 1, 0, 0, 0, 1]
else:
raise NotImplementedError
def get_atoms(self, include_heads=True):
return [self]
def atom_to_boxes(self, f, evaluation):
raise NotImplementedError
class Symbol(Atom):
def __new__(cls, name, sympy_dummy=None):
self = super(Symbol, cls).__new__(cls)
self.name = ensure_context(name)
self.sympy_dummy = sympy_dummy
return self
def __str__(self):
return self.name
def do_copy(self):
return Symbol(self.name)
def boxes_to_text(self, **options):
return str(self.name)
def atom_to_boxes(self, f, evaluation):
return String(evaluation.definitions.shorten_name(self.name))
def to_sympy(self, **kwargs):
from mathics.builtin import mathics_to_sympy
if self.sympy_dummy is not None:
return self.sympy_dummy
builtin = mathics_to_sympy.get(self.name)
if (builtin is None or not builtin.sympy_name or # nopep8
not builtin.is_constant()):
return sympy.Symbol(sympy_symbol_prefix + self.name)
else:
return builtin.to_sympy(self)
def to_python(self, *args, **kwargs):
if self.name == 'System`True':
return True
if self.name == 'System`False':
return False
if self.name == 'System`Null':
return None
n_evaluation = kwargs.get('n_evaluation')
if n_evaluation is not None:
value = Expression('N', self).evaluate(n_evaluation)
return value.to_python()
# return name as string (Strings are returned with quotes)
return self.name
def default_format(self, evaluation, form):
return self.name
def get_attributes(self, definitions):
return definitions.get_attributes(self.name)
def get_name(self):
return self.name
def is_symbol(self):
return True
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(Symbol, self).get_sort_key(True)
else:
return [1 if self.is_numeric() else 2,
2, Monomial({self.name: 1}), 0, self.name, 1]
def same(self, other):
return isinstance(other, Symbol) and self.name == other.name
def replace_vars(self, vars, options={}, in_scoping=True):
assert all(fully_qualified_symbol_name(v) for v in vars)
var = vars.get(self.name, None)
if var is None:
return self
else:
return var
def has_symbol(self, symbol_name):
return self.name == ensure_context(symbol_name)
def evaluate(self, evaluation):
rules = evaluation.definitions.get_ownvalues(self.name)
for rule in rules:
result = rule.apply(self, evaluation, fully=True)
if result is not None and not result.same(self):
return result.evaluate(evaluation)
return self
def is_true(self):
return self.name == 'System`True'
def is_numeric(self):
return self.name in system_symbols(
'Pi', 'E', 'EulerGamma', 'GoldenRatio',
'MachinePrecision', 'Catalan')
def __hash__(self):
return hash(('Symbol', self.name)) # to distinguish from String
def user_hash(self, update):
update(b'System`Symbol>' + self.name.encode('utf8'))
def __getnewargs__(self):
return (self.name, self.sympy_dummy)
class Number(Atom):
def __str__(self):
return str(self.value)
@staticmethod
def from_mpmath(value, prec=None):
'Converts mpf or mpc to Number.'
if isinstance(value, mpmath.mpf):
if prec is None:
return MachineReal(float(value))
else:
# HACK: use str here to prevent loss of precision
return PrecisionReal(sympy.Float(str(value), prec))
elif isinstance(value, mpmath.mpc):
real = Number.from_mpmath(value.real, prec)
imag = Number.from_mpmath(value.imag, prec)
return Complex(real, imag)
else:
raise TypeError(type(value))
def is_numeric(self):
return True
def _ExponentFunction(value):
n = value.get_int_value()
if -5 <= n <= 5:
return Symbol('Null')
else:
return value
def _NumberFormat(man, base, exp, options):
if exp.get_string_value():
if options['_Form'] in ('System`InputForm', 'System`OutputForm', 'System`FullForm'):
return Expression('RowBox', Expression('List', man, String('*^'), exp))
else:
return Expression('RowBox', Expression('List', man, String(options['NumberMultiplier']),
Expression('SuperscriptBox', base, exp)))
else:
return man
_number_form_options = {
'DigitBlock': [0, 0],
'ExponentFunction': _ExponentFunction,
'ExponentStep': 1,
'NumberFormat': _NumberFormat,
'NumberPadding': ['', '0'],
'NumberPoint': '.',
'NumberSigns': ['-', ''],
'SignPadding': False,
'NumberMultiplier': '\u00d7',
}
class Integer(Number):
def __new__(cls, value):
n = int(value)
self = super(Integer, cls).__new__(cls)
self.value = n
return self
def boxes_to_text(self, **options):
return str(self.value)
def boxes_to_xml(self, **options):
return self.make_boxes('MathMLForm').boxes_to_xml(**options)
def boxes_to_tex(self, **options):
return str(self.value)
def make_boxes(self, form):
return String(str(self.value))
def atom_to_boxes(self, f, evaluation):
return self.make_boxes(f.get_name())
def default_format(self, evaluation, form):
return str(self.value)
def to_sympy(self, **kwargs):
return sympy.Integer(self.value)
def to_mpmath(self):
return mpmath.mpf(self.value)
def to_python(self, *args, **kwargs):
return self.value
def round(self, d=None):
if d is None:
return MachineReal(float(self.value))
else:
return PrecisionReal(sympy.Float(self.value, d))
def get_int_value(self):
return self.value
def same(self, other):
return isinstance(other, Integer) and self.value == other.value
def evaluate(self, evaluation):
evaluation.check_stopped()
return self
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(Integer, self).get_sort_key(True)
else:
return [0, 0, self.value, 0, 1]
def do_copy(self):
return Integer(self.value)
def __hash__(self):
return hash(('Integer', self.value))
def user_hash(self, update):
update(b'System`Integer>' + str(self.value).encode('utf8'))
def __getnewargs__(self):
return (self.value,)
def __neg__(self):
return Integer(-self.value)
@property
def is_zero(self):
return self.value == 0
class Rational(Number):
def __new__(cls, numerator, denominator=1):
self = super(Rational, cls).__new__(cls)
self.value = sympy.Rational(numerator, denominator)
return self
def atom_to_boxes(self, f, evaluation):
return self.format(evaluation, f.get_name())
def to_sympy(self, **kwargs):
return self.value
def to_mpmath(self):
return mpmath.mpf(self.value)
def to_python(self, *args, **kwargs):
return float(self.value)
def round(self, d=None):
if d is None:
return MachineReal(float(self.value))
else:
return PrecisionReal(self.value.n(d))
def same(self, other):
return isinstance(other, Rational) and self.value == other.value
def numerator(self):
return Integer(self.value.as_numer_denom()[0])
def denominator(self):
return Integer(self.value.as_numer_denom()[1])
def do_format(self, evaluation, form):
assert fully_qualified_symbol_name(form)
if form == 'System`FullForm':
return Expression(
Expression('HoldForm', Symbol('Rational')), self.numerator(),
self.denominator()).do_format(evaluation, form)
else:
numerator = self.numerator()
minus = numerator.value < 0
if minus:
numerator = Integer(-numerator.value)
result = Expression('Divide', numerator, self.denominator())
if minus:
result = Expression('Minus', result)
result = Expression('HoldForm', result)
return result.do_format(evaluation, form)
def default_format(self, evaluation, form):
return 'Rational[%s, %s]' % self.value.as_numer_denom()
def evaluate(self, evaluation):
evaluation.check_stopped()
return self
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(Rational, self).get_sort_key(True)
else:
# HACK: otherwise "Bus error" when comparing 1==1.
return [0, 0, sympy.Float(self.value), 0, 1]
def do_copy(self):
return Rational(self.value)
def __hash__(self):
return hash(("Rational", self.value))
def user_hash(self, update):
update(b'System`Rational>' + ('%s>%s' % self.value.as_numer_denom()).encode('utf8'))
def __getnewargs__(self):
return (self.numerator().get_int_value(), self.denominator().get_int_value())
def __neg__(self):
return Rational(-self.numerator().get_int_value(), self.denominator().get_int_value())
@property
def is_zero(self):
return self.numerator == 0
class Real(Number):
def __new__(cls, value, p=None):
if isinstance(value, str):
value = str(value)
if p is None:
digits = (''.join(re.findall('[0-9]+', value))).lstrip('0')
if digits == '': # Handle weird Mathematica zero case
p = max(prec(len(value.replace('0.', ''))),
machine_precision)
else:
p = prec(len(digits.zfill(dps(machine_precision))))
elif isinstance(value, sympy.Float):
if p is None:
p = value._prec + 1
elif isinstance(value,
(Integer, sympy.Number, mpmath.mpf, float, int)):
if p is not None and p > machine_precision:
value = str(value)
else:
raise TypeError('Unknown number type: %s (type %s)' % (
value, type(value)))
# return either machine precision or arbitrary precision real
if p is None or p == machine_precision:
return MachineReal.__new__(MachineReal, value)
else:
return PrecisionReal.__new__(PrecisionReal, value)
def boxes_to_text(self, **options):
return self.make_boxes('System`OutputForm').boxes_to_text(**options)
def boxes_to_xml(self, **options):
return self.make_boxes('System`MathMLForm').boxes_to_xml(**options)
def boxes_to_tex(self, **options):
return self.make_boxes('System`TeXForm').boxes_to_tex(**options)
def atom_to_boxes(self, f, evaluation):
return self.make_boxes(f.get_name())
def evaluate(self, evaluation):
evaluation.check_stopped()
return self
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(Real, self).get_sort_key(True)
return [0, 0, self.value, 0, 1]
def __eq__(self, other):
if isinstance(other, Real):
# MMA Docs: "Approximate numbers that differ in their last seven
# binary digits are considered equal"
_prec = min_prec(self, other)
with mpmath.workprec(_prec):
rel_eps = 0.5 ** (_prec - 7)
return mpmath.almosteq(self.to_mpmath(), other.to_mpmath(), abs_eps=0, rel_eps=rel_eps)
else:
return self.get_sort_key() == other.get_sort_key()
def __ne__(self, other):
# Real is a total order
return not (self == other)
def __hash__(self):
# ignore last 7 binary digits when hashing
_prec = self.get_precision()
return hash(("Real", self.to_sympy().n(dps(_prec))))
def user_hash(self, update):
# ignore last 7 binary digits when hashing
_prec = self.get_precision()
update(b'System`Real>' + str(self.to_sympy().n(dps(_prec))).encode('utf8'))
def get_atom_name(self):
return 'Real'
class MachineReal(Real):
'''
Machine precision real number.
Stored internally as a python float.
'''
def __new__(cls, value):
self = Number.__new__(cls)
self.value = float(value)
if math.isinf(self.value) or math.isnan(self.value):
raise OverflowError
return self
def to_python(self, *args, **kwargs):
return self.value
def to_sympy(self):
return sympy.Float(self.value)
def to_mpmath(self):
return mpmath.mpf(self.value)
def round(self, d=None):
return self
def same(self, other):
if isinstance(other, MachineReal):
return self.value == other.value
elif isinstance(other, PrecisionReal):
return self.to_sympy() == other.value
return False
def is_machine_precision(self):
return True
def get_precision(self):
return machine_precision
def get_float_value(self, permit_complex=False):
return self.value
def make_boxes(self, form):
from mathics.builtin.inout import number_form
_number_form_options['_Form'] = form # passed to _NumberFormat
if form in ('System`InputForm', 'System`FullForm'):
n = None
else:
n = 6
return number_form(self, n, None, None, _number_form_options)
def __getnewargs__(self):
return (self.value,)
def do_copy(self):
return MachineReal(self.value)
def __neg__(self):
return MachineReal(-self.value)
@property
def is_zero(self):
return self.value == 0.0
class PrecisionReal(Real):
'''
Arbitrary precision real number.
Stored internally as a sympy.Float.
'''
def __new__(cls, value):
self = Number.__new__(cls)
self.value = sympy.Float(value)
return self
def to_python(self, *args, **kwargs):
return float(self.value)
def to_sympy(self):
return self.value
def to_mpmath(self):
return mpmath.mpf(self.value)
def round(self, d=None):
if d is None:
return MachineReal(float(self.value))
else:
d = min(dps(self.get_precision()), d)
return PrecisionReal(self.value.n(d))
def same(self, other):
if isinstance(other, PrecisionReal):
return self.value == other.value
elif isinstance(other, MachineReal):
return self.value == other.to_sympy()
return False
def get_precision(self):
return self.value._prec + 1
def make_boxes(self, form):
from mathics.builtin.inout import number_form
_number_form_options['_Form'] = form # passed to _NumberFormat
return number_form(self, dps(self.get_precision()), None, None, _number_form_options)
def __getnewargs__(self):
return (self.value,)
def do_copy(self):
return PrecisionReal(self.value)
def __neg__(self):
return PrecisionReal(-self.value)
@property
def is_zero(self):
return self.value == 0.0
class Complex(Number):
'''
Complex wraps two real-valued Numbers.
'''
def __new__(cls, real, imag):
self = super(Complex, cls).__new__(cls)
if isinstance(real, Complex) or not isinstance(real, Number):
raise ValueError("Argument 'real' must be a real number.")
if isinstance(imag, Complex) or not isinstance(imag, Number):
raise ValueError("Argument 'imag' must be a real number.")
if imag.same(Integer(0)):
return real
if isinstance(real, MachineReal) and not isinstance(imag, MachineReal):
imag = imag.round()
if isinstance(imag, MachineReal) and not isinstance(real, MachineReal):
real = real.round()
self.real = real
self.imag = imag
return self
def atom_to_boxes(self, f, evaluation):
return self.format(evaluation, f.get_name())
def __str__(self):
return str(self.to_sympy())
def to_sympy(self, **kwargs):
return self.real.to_sympy() + sympy.I * self.imag.to_sympy()
def to_python(self, *args, **kwargs):
return complex(self.real.to_python(), self.imag.to_python())
def to_mpmath(self):
return mpmath.mpc(self.real.to_mpmath(), self.imag.to_mpmath())
def do_format(self, evaluation, form):
if form == 'System`FullForm':
return Expression(Expression('HoldForm', Symbol('Complex')),
self.real, self.imag).do_format(evaluation, form)
result = []
if self.is_machine_precision() or not self.real.is_zero:
result.append(self.real)
if self.imag.same(Integer(1)):
result.append(Symbol('I'))
else:
result.append(Expression('Times', self.imag, Symbol('I')))
if len(result) == 1:
result = result[0]
else:
result = Expression('Plus', *result)
return Expression('HoldForm', result).do_format(evaluation, form)
def default_format(self, evaluation, form):
return 'Complex[%s, %s]' % (self.real.default_format(evaluation, form),
self.imag.default_format(evaluation, form))
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(Complex, self).get_sort_key(True)
else:
return [0, 0, self.real.get_sort_key()[2],
self.imag.get_sort_key()[2], 1]
def same(self, other):
return (isinstance(other, Complex) and self.real == other.real and
self.imag == other.imag)
def evaluate(self, evaluation):
evaluation.check_stopped()
return self
def round(self, d=None):
real = self.real.round(d)
imag = self.imag.round(d)
return Complex(real, imag)
def is_machine_precision(self):
if self.real.is_machine_precision() or self.imag.is_machine_precision():
return True
return False
def get_float_value(self, permit_complex=False):
if permit_complex:
real = self.real.get_float_value()
imag = self.imag.get_float_value()
if real is not None and imag is not None:
return complex(real, imag)
else:
return None
def get_precision(self):
real_prec = self.real.get_precision()
imag_prec = self.imag.get_precision()
if imag_prec is None or real_prec is None:
return None
return min(real_prec, imag_prec)
def do_copy(self):
return Complex(self.real.do_copy(), self.imag.do_copy())
def __hash__(self):
return hash(('Complex', self.real, self.imag))
def user_hash(self, update):
update(b'System`Complex>')
update(self.real)
update(self.imag)
def __eq__(self, other):
if isinstance(other, Complex):
return self.real == other.real and self.imag == other.imag
else:
return self.get_sort_key() == other.get_sort_key()
def __getnewargs__(self):
return (self.real, self.imag)
def __neg__(self):
return Complex(-self.real, -self.imag)
@property
def is_zero(self):
return self.real.is_zero and self.imag.is_zero
def encode_mathml(text):
text = text.replace('&', '&').replace('<', '<').replace('>', '>')
text = text.replace('"', '"').replace(' ', ' ')
return text.replace('\n', '<mspace linebreak="newline" />')
TEX_REPLACE = {
'{': r'\{',
'}': r'\}',
'_': r'\_',
'$': r'\$',
'%': r'\%',
'#': r'\#',
'&': r'\&',
'\\': r'\backslash{}',
'^': r'{}^{\wedge}',
'~': r'\sim{}',
'|': r'\vert{}',
}
TEX_TEXT_REPLACE = TEX_REPLACE.copy()
TEX_TEXT_REPLACE.update({
'<': r'$<$',
'>': r'$>$',
'~': r'$\sim$',
'|': r'$\vert$',
'\\': r'$\backslash$',
'^': r'${}^{\wedge}$',
})
TEX_REPLACE_RE = re.compile('([' + ''.join(
[re.escape(c) for c in TEX_REPLACE]) + '])')
def encode_tex(text, in_text=False):
def replace(match):
c = match.group(1)
repl = TEX_TEXT_REPLACE if in_text else TEX_REPLACE
# return TEX_REPLACE[c]
return repl.get(c, c)
text = TEX_REPLACE_RE.sub(replace, text)
text = text.replace('\n', '\\newline\n')
return text
extra_operators = set((',', '(', ')', '[', ']', '{', '}',
'\u301a', '\u301b', '\u00d7', '\u2032',
'\u2032\u2032', ' ', '\u2062', '\u222b', '\u2146'))
class String(Atom):
def __new__(cls, value):
self = super(String, cls).__new__(cls)
self.value = str(value)
return self
def __str__(self):
return '"%s"' % self.value
def boxes_to_text(self, show_string_characters=False, **options):
value = self.value
if (not show_string_characters and # nopep8
value.startswith('"') and value.endswith('"')):
value = value[1:-1]
return value
def boxes_to_xml(self, show_string_characters=False, **options):
from mathics.core.parser import is_symbol_name
from mathics.builtin import builtins
operators = set()
for name, builtin in builtins.items():
operator = builtin.get_operator_display()
if operator is not None:
operators.add(operator)
text = self.value
def render(format, string):
return format % encode_mathml(string)
if text.startswith('"') and text.endswith('"'):
if show_string_characters:
return render('<ms>%s</ms>', text[1:-1])
else:
return render('<mtext>%s</mtext>', text[1:-1])
elif text and ('0' <= text[0] <= '9' or text[0] == '.'):
return render('<mn>%s</mn>', text)
else:
if text in operators or text in extra_operators:
if text == '\u2146':
return render(
'<mo form="prefix" lspace="0.2em" rspace="0">%s</mo>', text)
if text == '\u2062':
return render(
'<mo form="prefix" lspace="0" rspace="0.2em">%s</mo>', text)
return render('<mo>%s</mo>', text)
elif is_symbol_name(text):
return render('<mi>%s</mi>', text)
else:
return render('<mtext>%s</mtext>', text)
def boxes_to_tex(self, show_string_characters=False, **options):
from mathics.builtin import builtins
operators = set()
for name, builtin in builtins.items():
operator = builtin.get_operator_display()
if operator is not None:
operators.add(operator)
text = self.value
def render(format, string, in_text=False):
return format % encode_tex(string, in_text)
if text.startswith('"') and text.endswith('"'):
if show_string_characters:
return render(r'\text{"%s"}', text[1:-1], in_text=True)
else:
return render(r'\text{%s}', text[1:-1], in_text=True)
elif text and text[0] in '0123456789-.':
return render('%s', text)
else:
if text == '\u2032':
return "'"
elif text == '\u2032\u2032':
return "''"
elif text == '\u2062':
return ' '
elif text == '\u221e':
return r'\infty '
elif text == '\u00d7':
return r'\times '
elif text in ('(', '[', '{'):
return render(r'\left%s', text)
elif text in (')', ']', '}'):
return render(r'\right%s', text)
elif text == '\u301a':
return r'\left[\left['
elif text == '\u301b':
return r'\right]\right]'
elif text == ',' or text == ', ':
return text
elif text == '\u222b':
return r'\int'
elif text == '\u2146':
return r'\, d'
elif text == '\u2211':
return r'\sum'
elif text == '\u220f':
return r'\prod'
elif len(text) > 1:
return render(r'\text{%s}', text, in_text=True)
else:
return render('%s', text)
def atom_to_boxes(self, f, evaluation):
return String('"' + str(self.value) + '"')
def do_copy(self):
return String(self.value)
def default_format(self, evaluation, form):
value = self.value.replace('\\', '\\\\').replace('"', '\\"')
return '"%s"' % value
def get_sort_key(self, pattern_sort=False):
if pattern_sort:
return super(String, self).get_sort_key(True)
else:
return [0, 1, self.value, 0, 1]
def same(self, other):
return isinstance(other, String) and self.value == other.value
def get_string_value(self):
return self.value
def to_sympy(self, **kwargs):
return None
def to_python(self, *args, **kwargs):
return '"%s"' % self.value # add quotes to distinguish from Symbols
def __hash__(self):
return hash(("String", self.value))
def user_hash(self, update):
# hashing a String is the one case where the user gets the untampered
# hash value of the string's text. this corresponds to MMA behavior.
update(self.value.encode('utf8'))
def __getnewargs__(self):
return (self.value,)
def get_default_value(name, evaluation, k=None, n=None):
pos = []
if k is not None:
pos.append(k)
if n is not None:
pos.append(n)
for pos_len in reversed(list(range(len(pos) + 1))):
# Try patterns from specific to general
defaultexpr = Expression('Default', Symbol(name),
*[Integer(index) for index in pos[:pos_len]])
result = evaluation.definitions.get_value(
name, 'System`DefaultValues', defaultexpr, evaluation)
if result is not None:
if result.same(defaultexpr):
result = result.evaluate(evaluation)
return result
return None
def print_parenthesizes(precedence, outer_precedence=None,
parenthesize_when_equal=False):
return (outer_precedence is not None and (
outer_precedence > precedence or (
outer_precedence == precedence and parenthesize_when_equal)))
|
the-stack_106_32077 | # @TODO instead of using threading, replace with aiohttp!
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
import json
import logging
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
def parse_jcu_command(path):
duration = None
# the logic is slightly weird. The main command is a 'structure' in dot notation,
# but there can be more than one "nested" value:
# > axis.t.displacement=-57,duration=0
# {"axis":{"t":{"displacement":-32,"duration":0}}}
# this is a generic way to parse, but in any case we'll need to interpret the commands at some point
"""
for some commands (`button`) the normal pattern:
a.b.c=3,d=4 => a: b: c
is instead:
a.b,c=3,d=4
in that case, _command should include the first additional term.
"""
# print("parsing {}".format(path))
commands = {} # list of all commands
_commands = commands # pointer
_path = path.split('.')
_v = _path[-1].split(',', 1)
if '=' not in _v[0]:
_command = _path[:-1] + [_v[0]]
# I have no examples yet... but there could be no "values". Might need to protect this.
_values = _v[1]
else:
_command = _path[:-1]
_values = _path[-1]
values = dict(v.split('=') for v in _values.split(','))
while _command:
k = _command.pop(0)
_commands[k] = {}
_commands = _commands[k]
for k, v in values.items():
try:
# check if this is a number...
v = float(v)
except:
pass
_commands[k] = v
if k == 'duration': # special case - do we care?
duration = v
return commands
def test_parse_jcu_command():
for (request, response) in [
# "jcu_config", {"config":{"timescale":-3,"magnitudescale":0,"keepalive":2000,"commandrepeat":500,"cameraudn":M-Series,"powerondelay":1000,"shutdowndelay":2000,"backlightdelay":1000,"deadbandatrest_idx":25,"deadbandatrest_idy":25,"deadbandatrest_idz":25, "deadbandatrest_idt":25,"deadbandtranslation_idx":0,"deadbandtranslation_idy":0,"deadbandtranslation_idz":0,"deadbandtranslation_idt":0}}),
("connect.jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6",
{"connect": {"jcuudn": "uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6"}}),
("keepalive.jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6",
{"keepalive": {"jcuudn": "uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6"}}),
("axis.t.displacement=-32,duration=0",
{"axis": {"t": {"displacement": -32, "duration": 0}}}),
("button.F,state=DOWN,duration=0,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6",
{"button": {"F": {"state": "DOWN", "duration": 0}}}),
("button.F,state=UP,duration=210,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6",
{"button": {"F": {"state": "UP", "duration": 210}}}),
# the way things are combined is just... crazy....
("button.F,state=DOWN,duration=2000,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6,F,state=UP,duration=2010,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6", {})
]:
r = parse_jcu_command(request)
# This test will fail if jcuudn is returned for `button` messages... thou that's because it's a crappy api :-(
assert r == response, "parsing error: {} != {}".format(r, response)
class UPNPHTTPServerHandler(BaseHTTPRequestHandler):
"""
A HTTP handler that serves the UPnP XML descriptions and responds to the JCU commands.
JCU
===
known requests (and responses)
/bi-cgi?connect.jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6
{"connect":{"jcuudn":"uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6"}}\r\n
/bi-cgi?jcu_config
{"config":{"timescale":-3,"magnitudescale":0,"keepalive":2000,"commandrepeat":500,"cameraudn":M-Series,"powerondelay":1000,"shutdowndelay":2000,"backlightdelay":1000,"deadbandatrest_idx":25,"deadbandatrest_idy":25,"deadbandatrest_idz":25, "deadbandatrest_idt":25,"deadbandtranslation_idx":0,"deadbandtranslation_idy":0,"deadbandtranslation_idz":0,"deadbandtranslation_idt":0}}
/bi-cgi?keepalive.jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6
{"keepalive":{"jcuudn":"uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6"}}\r\n
/bi-cgi?axis.t.displacement=-57,duration=0
{"axis":{"t":{"displacement":-32,"duration":0}}}\r\n
/bi-cgi?button.F,state=DOWN,duration=0,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6
{"button":{"F":{"state":"DOWN","duration":0}}}
/bi-cgi?button.F,state=UP,duration=210,jcuudn=uuid:JCU-1_0-1A22-EF16-11DD-84A7-00405F40A3D6
{"button":{"F":{"state":"UP","duration":210}}}
# in some cases the jcuudn is sent again... but only in some cases! and it's not replied.
# also this command is not conform, as it should be:
button.F,state -> button.F.state
"""
# Handler for the GET requests
def do_GET(self):
logger.info("Got a `GET` for %s from %s", self.path, self.client_address)
if self.path.startswith('/bi-cgi?'):
return self.parse_jcu_command(self.path.replace('/bi-cgi?', ''))
if self.path == '/description.xml': # this should be parametric... check server.description_url!
return self.return_description()
# i could centralize sending the response here
logger.warning("Unknown request %s", self.path)
def respond(self, message, content_type='text/html'):
try:
self.send_response(200)
self.send_header('Content-type', content_type)
self.end_headers()
self.wfile.write(message)
except Exception as e:
logger.error('Could not send out a response to the request `%s`: %s', self.path, e)
def return_description(self):
logger.debug("sending description: %s", self.server.description)
self.respond(self.server.description.encode(), 'application/xml')
return
def parse_jcu_command(self, path):
"""
process jcu_commands
:return:
"""
# bit of repetition with bi-cgi. Instead, each response could check if it can handle the path.
# Or we pass the "command" part to this method
if path == 'jcu_config':
# this is a bit of an exception so we could take it out of this method
# here we should return some config:
logger.info("received an SSDP XML config request (%s)", self.path)
response = json.dumps(self.camera_config())
else:
commands = parse_jcu_command(path)
# if k == 'duration': # special case - do we care?
response = json.dumps(commands)
self.respond(response.encode())
def camera_config(self):
# m87
config = {
"config": {
"timescale": -3, "magnitudescale": 0, "keepalive": 2000, "commandrepeat": 500,
"cameraudn": "M-Series", "powerondelay": 1000, "shutdowndelay": 2000,
"backlightdelay": 1000, "deadbandatrest_idx": 25, "deadbandatrest_idy": 25,
"deadbandatrest_idz": 25, "deadbandatrest_idt": 25, "deadbandtranslation_idx": 0,
"deadbandtranslation_idy": 0, "deadbandtranslation_idz": 0,
"deadbandtranslation_idt": 0
}
}
return config
class UPNPHTTPServerBase(HTTPServer):
"""
A simple HTTP server that knows the information about a UPnP device.
"""
def __init__(self, server_address, request_handler_class, description):
HTTPServer.__init__(self, server_address, request_handler_class)
self.port = server_address[1]
self.description = description
class UPNPHTTPServer(threading.Thread):
"""
A thread that runs UPNPHTTPServerBase.
"""
def __init__(self, server_address, description):
"""
:param server_address: (ip_address, port)
:param description: (text to be sent back on the description_url
"""
threading.Thread.__init__(self, daemon=True)
self.server = UPNPHTTPServerBase(server_address, UPNPHTTPServerHandler, description)
@property
def address(self):
return self.server.server_address
@property
def baseurl(self):
return 'http://{}:{}'.format(*self.address)
@property
def description_url(self):
return '/'.join([self.baseurl, 'description.xml'])
def run(self):
self.server.serve_forever()
|
the-stack_106_32078 | def setup_fs(s3, key="", secret="", endpoint="", cert="", passwords={}):
"""Given a boolean specifying whether to use local disk or S3, setup filesystem
Syntax examples: AWS (http://s3.us-east-2.amazonaws.com), MinIO (http://192.168.0.1:9000)
The cert input is relevant if you're using MinIO with TLS enabled, for specifying the path to the certficiate.
The block_size is set to accomodate files up to 55 MB in size. If your log files are larger, adjust this value accordingly
"""
if s3:
import s3fs
block_size = 55 * 1024 * 1024
if "amazonaws" in endpoint:
fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size)
elif cert != "":
fs = s3fs.S3FileSystem(
key=key, secret=secret, client_kwargs={"endpoint_url": endpoint, "verify": cert}, default_block_size=block_size
)
else:
fs = s3fs.S3FileSystem(
key=key, secret=secret, client_kwargs={"endpoint_url": endpoint}, default_block_size=block_size
)
else:
from pathlib import Path
import canedge_browser
base_path = Path(__file__).parent
fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords)
return fs
# -----------------------------------------------
def load_dbc_files(dbc_paths):
"""Given a list of DBC file paths, create a list of conversion rule databases
"""
import can_decoder
from pathlib import Path
db_list = []
for dbc in dbc_paths:
db = can_decoder.load_dbc(Path(__file__).parent / dbc)
db_list.append(db)
return db_list
# -----------------------------------------------
def list_log_files(fs, devices, start_times, verbose=True, passwords={}):
"""Given a list of device paths, list log files from specified filesystem.
Data is loaded based on the list of start datetimes
"""
import canedge_browser, mdf_iter
log_files = []
if len(start_times):
for idx, device in enumerate(devices):
start = start_times[idx]
log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)
log_files.extend(log_files_device)
if verbose:
print(f"Found {len(log_files)} log files\n")
return log_files
def restructure_data(df_phys, res, full_col_names=False, pgn_names=False):
import pandas as pd
from J1939_PGN import J1939_PGN
df_phys_join = pd.DataFrame({"TimeStamp": []})
if not df_phys.empty:
for message, df_phys_message in df_phys.groupby("CAN ID"):
for signal, data in df_phys_message.groupby("Signal"):
pgn = J1939_PGN(int(message)).pgn
if full_col_names == True and pgn_names == False:
col_name = str(hex(int(message))).upper()[2:] + "." + signal
elif full_col_names == True and pgn_names == True:
col_name = str(hex(int(message))).upper()[2:] + "." + str(pgn) + "." + signal
elif full_col_names == False and pgn_names == True:
col_name = str(pgn) + "." + signal
else:
col_name = signal
df_phys_join = pd.merge_ordered(
df_phys_join,
data["Physical Value"].rename(col_name).resample(res).pad().dropna(),
on="TimeStamp",
fill_method="none",
).set_index("TimeStamp")
return df_phys_join
def add_custom_sig(df_phys, signal1, signal2, function, new_signal):
"""Helper function for calculating a new signal based on two signals and a function.
Returns a dataframe with the new signal name and physical values
"""
import pandas as pd
try:
s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1)
s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2)
df_new_sig = pd.merge_ordered(s1, s2, on="TimeStamp", fill_method="ffill",).set_index("TimeStamp")
df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame()
df_new_sig["Signal"] = new_signal
df_phys = df_phys.append(df_new_sig)
except:
print(f"Warning: Custom signal {new_signal} not created\n")
return df_phys
# -----------------------------------------------
class ProcessData:
def __init__(self, fs, db_list, signals=[], days_offset=None, verbose=True):
self.db_list = db_list
self.signals = signals
self.fs = fs
self.days_offset = days_offset
self.verbose = verbose
return
def extract_phys(self, df_raw):
"""Given df of raw data and list of decoding databases, create new def with
physical values (no duplicate signals and optionally filtered/rebaselined)
"""
import can_decoder
import pandas as pd
df_phys = pd.DataFrame()
for db in self.db_list:
df_decoder = can_decoder.DataFrameDecoder(db)
df_phys_temp = pd.DataFrame()
for length, group in df_raw.groupby("DataLength"):
df_phys_group = df_decoder.decode_frame(group)
df_phys_temp = df_phys_temp.append(df_phys_group)
df_phys = df_phys.append(df_phys_temp.sort_index())
# remove duplicates in case multiple DBC files contain identical signals
df_phys["datetime"] = df_phys.index
df_phys = df_phys.drop_duplicates(keep="first")
df_phys = df_phys.drop("datetime", 1)
# optionally filter and rebaseline the data
df_phys = self.filter_signals(df_phys)
if not df_phys.empty and type(self.days_offset) == int:
df_phys = self.rebaseline_data(df_phys)
return df_phys
def rebaseline_data(self, df_phys):
"""Given a df of physical values, this offsets the timestamp
to be equal to today, minus a given number of days.
"""
from datetime import datetime, timezone
import pandas as pd
delta_days = (datetime.now(timezone.utc) - df_phys.index.min()).days - self.days_offset
df_phys.index = df_phys.index + pd.Timedelta(delta_days, "day")
return df_phys
def filter_signals(self, df_phys):
"""Given a df of physical values, return only signals matched by filter
"""
if not df_phys.empty and len(self.signals):
df_phys = df_phys[df_phys["Signal"].isin(self.signals)]
return df_phys
def get_raw_data(self, log_file, lin=False, passwords={}):
"""Extract a df of raw data and device ID from log file.
Optionally include LIN bus data by setting lin=True
"""
import mdf_iter
with self.fs.open(log_file, "rb") as handle:
mdf_file = mdf_iter.MdfFile(handle, passwords=passwords)
device_id = self.get_device_id(mdf_file)
if lin:
df_raw_lin = mdf_file.get_data_frame_lin()
df_raw_lin["IDE"] = 0
df_raw_can = mdf_file.get_data_frame()
df_raw = df_raw_can.append(df_raw_lin)
else:
df_raw = mdf_file.get_data_frame()
return df_raw, device_id
def get_device_id(self, mdf_file):
return mdf_file.get_metadata()["HDComment.Device Information.serial number"]["value_raw"]
def print_log_summary(self, device_id, log_file, df_phys):
"""Print summary information for each log file
"""
if self.verbose:
print(
"\n---------------",
f"\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\n",
)
# -----------------------------------------------
class MultiFrameDecoder:
"""BETA class for handling transport protocol data. For each response ID, identify
sequences of subsequent frames and combine the relevant parts of the data payloads
into a single payload with the response ID as the ID. The original raw dataframe is
then cleansed of the original response ID sequence frames. Instead, the new concatenated
frames are inserted. Further, the class supports DBC decoding of the resulting modified raw data
:param tp_type: the class supports UDS ("uds"), NMEA 2000 Fast Packets ("nmea") and J1939 ("j1939")
:param df_raw: dataframe of raw CAN data from the mdf_iter module
SINGLE_FRAME_MASK: mask used in matching single frames
FIRST_FRAME_MASK: mask used in matching first frames
CONSEQ_FRAME_MASK: mask used in matching consequtive frames
SINGLE_FRAME: frame type reflecting a single frame response
FIRST_FRAME: frame type reflecting the first frame in a multi frame response
CONSEQ_FRAME: frame type reflecting a consequtive frame in a multi frame response
ff_payload_start: the combined payload will start at this byte in the FIRST_FRAME
bam_pgn: this is used in J1939 and marks the initial BAM message ID in DEC
res_id_list_hex: TP 'response CAN IDs' to process. For nmea/j1939, these are provided by default
"""
def __init__(self, tp_type=""):
frame_struct_uds = {
"SINGLE_FRAME_MASK": 0xF0,
"FIRST_FRAME_MASK": 0xF0,
"CONSEQ_FRAME_MASK": 0xF0,
"SINGLE_FRAME": 0x00,
"FIRST_FRAME": 0x10,
"CONSEQ_FRAME": 0x20,
"ff_payload_start": 2,
"bam_pgn": -1,
"res_id_list_hex": ["0x7E0", "0x7E9", "0x7EA", "0x7EB", "0x7EC", "0x7ED", "0x7EE", "0x7EF", "0x7EA", "0x7BB"],
}
frame_struct_j1939 = {
"SINGLE_FRAME_MASK": 0xFF,
"FIRST_FRAME_MASK": 0xFF,
"CONSEQ_FRAME_MASK": 0x00,
"SINGLE_FRAME": 0xFF,
"FIRST_FRAME": 0x20,
"CONSEQ_FRAME": 0x00,
"ff_payload_start": 8,
"bam_pgn": int("0xEC00", 16),
"res_id_list_hex": ["0xEB00"],
}
frame_struct_nmea = {
"SINGLE_FRAME_MASK": 0xFF,
"FIRST_FRAME_MASK": 0x0F,
"CONSEQ_FRAME_MASK": 0x00,
"SINGLE_FRAME": 0xFF,
"FIRST_FRAME": 0x00,
"CONSEQ_FRAME": 0x00,
"ff_payload_start": 2,
"bam_pgn": -1,
"res_id_list_hex": [
"0xfed8",
"0x1f007",
"0x1f008",
"0x1f009",
"0x1f014",
"0x1f016",
"0x1f101",
"0x1f105",
"0x1f201",
"0x1f208",
"0x1f209",
"0x1f20a",
"0x1f20c",
"0x1f20f",
"0x1f210",
"0x1f212",
"0x1f513",
"0x1f805",
"0x1f80e",
"0x1f80f",
"0x1f810",
"0x1f811",
"0x1f814",
"0x1f815",
"0x1f904",
"0x1f905",
"0x1fa04",
"0x1fb02",
"0x1fb03",
"0x1fb04",
"0x1fb05",
"0x1fb11",
"0x1fb12",
"0x1fd10",
"0x1fe07",
"0x1fe12",
"0x1ff14",
"0x1ff15",
],
}
if tp_type == "uds":
self.frame_struct = frame_struct_uds
elif tp_type == "j1939":
self.frame_struct = frame_struct_j1939
elif tp_type == "nmea":
self.frame_struct = frame_struct_nmea
else:
self.frame_struct = {}
self.tp_type = tp_type
return
def calculate_pgn(self, frame_id):
pgn = (frame_id & 0x03FFFF00) >> 8
pgn_f = (pgn & 0xFF00) >> 8
pgn_s = pgn & 0x00FF
if pgn_f < 240:
pgn &= 0xFFFFFF00
return pgn
def construct_new_tp_frame(self, base_frame, payload_concatenated, can_id):
new_frame = base_frame
new_frame.at["DataBytes"] = payload_concatenated
new_frame.at["DLC"] = 0
new_frame.at["DataLength"] = len(payload_concatenated)
if can_id:
new_frame.at["ID"] = can_id
return new_frame
def combine_tp_frames(self, df_raw):
import pandas as pd
bam_pgn = self.frame_struct["bam_pgn"]
res_id_list = [int(res_id, 16) for res_id in self.frame_struct["res_id_list_hex"]]
df_list_combined = []
# use PGN matching for J1939 and NMEA and update res_id_list to relevant entries
if self.tp_type == "nmea" or self.tp_type == "j1939":
res_id_list_incl_bam = res_id_list
res_id_list_incl_bam.append(bam_pgn)
df_raw_match = df_raw["ID"].apply(self.calculate_pgn).isin(res_id_list_incl_bam)
res_id_list = df_raw["ID"][df_raw_match].apply(self.calculate_pgn).drop_duplicates().values.tolist()
df_raw_tp = df_raw[df_raw_match]
df_raw_excl_tp = df_raw[~df_raw_match]
else:
df_raw_match = df_raw["ID"].isin(res_id_list)
res_id_list = df_raw["ID"][df_raw_match].drop_duplicates().values.tolist()
df_raw_tp = df_raw[df_raw_match]
df_raw_excl_tp = df_raw[~df_raw["ID"].isin(res_id_list)]
if len(df_raw.index) - len(df_raw_tp.index) - len(df_raw_excl_tp.index):
print("Warning - total rows does not equal sum of rows incl/excl transport protocol frames")
df_list_combined.append(df_raw_excl_tp)
for res_id in res_id_list:
# filter raw data for response ID and extract a 'base frame'
if self.tp_type == "nmea" or self.tp_type == "j1939":
df_raw_res_id = df_raw_tp[df_raw_tp["ID"].apply(self.calculate_pgn).isin([res_id, bam_pgn])]
else:
df_raw_res_id = df_raw_tp[df_raw_tp["ID"].isin([res_id])]
if df_raw_res_id.empty:
continue
for channel, df_channel in df_raw_res_id.groupby("BusChannel"):
# if J1939, we can't group by CAN ID (as we need both bam_pgn and response)
if self.tp_type == "j1939":
group = "DataLength"
else:
group = "ID"
for identifier, df_raw_filter in df_channel.groupby(group):
base_frame = df_raw_filter.iloc[0]
frame_list = []
frame_timestamp_list = []
payload_concatenated = []
ff_length = 0xFFF
can_id = None
conseq_frame_prev = None
# iterate through rows in filtered dataframe
for index, row in df_raw_filter.iterrows():
first_byte = row["DataBytes"][0]
# check if first frame (either for UDS/NMEA or J1939 case)
if self.tp_type == "j1939" and bam_pgn == self.calculate_pgn(row["ID"]):
first_frame_test = True
elif (first_byte & self.frame_struct["FIRST_FRAME_MASK"]) == self.frame_struct["FIRST_FRAME"]:
first_frame_test = True
else:
first_frame_test = False
# if single frame, save frame directly (excl. 1st byte)
if self.tp_type != "nmea" and (
first_byte & self.frame_struct["SINGLE_FRAME_MASK"] == self.frame_struct["SINGLE_FRAME"]
):
new_frame = self.construct_new_tp_frame(base_frame, row["DataBytes"], row["ID"])
frame_list.append(new_frame.values.tolist())
frame_timestamp_list.append(index)
# if first frame, save info from prior multi frame response sequence,
# then initialize a new sequence incl. the first frame payload
elif first_frame_test:
# create a new frame using information from previous iterations
if len(payload_concatenated) >= ff_length:
new_frame = self.construct_new_tp_frame(base_frame, payload_concatenated, can_id)
frame_list.append(new_frame.values.tolist())
frame_timestamp_list.append(frame_timestamp)
# reset and start on next frame
payload_concatenated = []
conseq_frame_prev = None
frame_timestamp = index
# for J1939, extract PGN and convert to 29 bit CAN ID for use in baseframe
if self.tp_type == "j1939":
pgn_hex = "".join("{:02x}".format(x) for x in reversed(row["DataBytes"][5:8]))
pgn = int(pgn_hex, 16)
can_id = (6 << 26) | (pgn << 8) | 254
ff_length = (row["DataBytes"][0] & 0x0F) << 8 | row["DataBytes"][1]
for byte in row["DataBytes"][self.frame_struct["ff_payload_start"] :]:
payload_concatenated.append(byte)
# if consequtive frame, extend payload with payload excl. 1st byte
elif first_byte & self.frame_struct["CONSEQ_FRAME_MASK"] == self.frame_struct["CONSEQ_FRAME"]:
if (conseq_frame_prev == None) or ((first_byte - conseq_frame_prev) == 1):
conseq_frame_prev = first_byte
for byte in row["DataBytes"][1:]:
payload_concatenated.append(byte)
df_raw_res_id_new = pd.DataFrame(frame_list, columns=base_frame.index, index=frame_timestamp_list)
df_list_combined.append(df_raw_res_id_new)
df_raw_combined = pd.concat(df_list_combined)
df_raw_combined.index.name = "TimeStamp"
df_raw_combined = df_raw_combined.sort_index()
return df_raw_combined
|
the-stack_106_32079 | """
pyaud.plugins
=============
Main module used for public API.
"""
import functools as _functools
import importlib as _importlib
import os as _os
import sys as _sys
from abc import ABC as _ABC
from abc import abstractmethod as _abstractmethod
from pathlib import Path as _Path
from subprocess import CalledProcessError as _CalledProcessError
from typing import Any as _Any
from typing import Callable as _Callable
from typing import Dict as _Dict
from typing import List as _List
from typing import Optional as _Optional
from typing import Type as _Type
from typing import Union as _Union
from . import exceptions as _exceptions
from ._environ import DEFAULT_PLUGINS as _DEFAULT_PLUGINS
from ._environ import NAME as _NAME
from ._environ import SITE_PLUGINS as _SITE_PLUGINS
from ._environ import TempEnvVar as _TempEnvVar
from ._objects import MutableMapping as _MutableMapping
from ._utils import HashCap as _HashCap
from ._utils import Subprocess as _Subprocess
from ._utils import colors as _colors
from ._utils import files as _files
_plugin_paths: _List[_Path] = [_DEFAULT_PLUGINS, _SITE_PLUGINS]
def _check_command(func: _Callable[..., int]) -> _Callable[..., None]:
"""Run the routine common with all functions in this package.
:param func: Function to decorate.
:return: Wrapped function.
"""
@_functools.wraps(func)
def _wrapper(*args, **kwargs: bool) -> None:
if not _files.reduce():
print("No files found")
else:
returncode = func(*args, **kwargs)
if returncode:
_colors.red.bold.print(
f"Failed: returned non-zero exit status {returncode}",
file=_sys.stderr,
)
else:
_colors.green.bold.print(
f"Success: no issues found in {len(_files)} source files"
)
return _wrapper
class _SubprocessFactory( # pylint: disable=too-many-ancestors
_MutableMapping
):
"""Instantiate collection of ``Subprocess`` objects."""
def __init__(self, args: _List[str]):
super().__init__()
for arg in args:
self[arg] = _Subprocess(arg)
class Plugin(_ABC): # pylint: disable=too-few-public-methods
"""Base class of all plugins.
Raises ``TypeError`` if registered directly.
Contains the name attribute assigned upon registration.
Subprocesses are stored in the ``subprocess`` dict object
:param name: Name assigned to plugin via ``@register`` decorator.
"""
def __init__(self, name: str) -> None:
self.name = name
self.subprocess = _SubprocessFactory(self.exe)
@staticmethod
def audit_error() -> _exceptions.AuditError:
"""Raise if checks have failed.
:return: AuditError instantiated with error message.
"""
return _exceptions.AuditError(" ".join(_sys.argv))
@property
def env(self) -> _Dict[str, str]:
"""Return environment which will remain active for run.
:return: Dict containing any number of str keys and
corresponding str values.
"""
return {}
@property
def exe(self) -> _List[str]:
"""List of executables to add to ``subprocess`` dict.
:return: List of str object to assign to subprocesses
"""
return []
def __call__(self, *args: _Any, **kwargs: bool) -> _Any:
"""Enables calling of all plugin instances."""
class Audit(Plugin):
"""Blueprint for writing audit-only plugins.
Audit will be called from here.
Run within context of defined environment variables.
If no environment variables are defined nothing will change.
:raises CalledProcessError: Will always be raised if something
fails that is not to do with the
audit condition.
Will be excepted and reraised as
``AuditError`` if the audit fails.
:raises AuditError: Raised from ``CalledProcessError`` if
audit fails.
:return: If any error has not been raised for any
reason int object must be returned, from
subprocess or written, to notify call
whether process has succeeded or failed.
No value will actually return from
__call__ as it will be passed to the
decorator.
"""
@_abstractmethod
def audit(self, *args: _Any, **kwargs: bool) -> int:
"""All audit logic to be written within this method.
:param args: Args that can be passed from other plugins.
:param kwargs: Boolean flags for subprocesses.
:return: If any error has not been raised for any reason
int object must be returned, from subprocess or
written, to notify call whether process has
succeeded or failed.
"""
@_check_command
def __call__(self, *args: _Any, **kwargs: bool) -> int:
with _TempEnvVar(_os.environ, **self.env):
try:
return self.audit(*args, **kwargs)
except _CalledProcessError as err:
raise self.audit_error() from err
class Fix(Audit):
"""Blueprint for writing audit and fix plugins.
Audit will be called from here.
Called within context of defined environment variables.
If no environment variables are defined nothing will change.
If audit fails and the ``-f/--fix`` flag is passed to the
commandline the ``fix`` method will be called within the
``CalledProcessError`` try-except block.
If ``-f/--fix`` and the audit fails the user is running the
audit only and will raise an ``AuditError``.
:raises CalledProcessError: Will always be raised if something
fails that is not to do with the
audit condition.
Will be excepted and reraised as
``AuditError`` if the audit fails and
``-f/--fix`` is not passed to the
commandline.
:raises AuditError: Raised from ``CalledProcessError``
if audit fails and ``-f/--fix`` flag
if not passed to the commandline.
:return: If any error has not been raised for any
reason int object must be returned, from
subprocess or written, to notify call
whether process has succeeded or failed.
No value will actually return from
__call__ as it will be passed to the
decorator.
"""
@_abstractmethod
def audit(self, *args: _Any, **kwargs: bool) -> int:
"""All audit logic to be written within this method.
:param args: Args that can be passed from other plugins.
:param kwargs: Boolean flags for subprocesses.
:return: If any error has not been raised for any reason
int object must be returned, from subprocess or
written, to notify __call__ whether process has
succeeded or failed.
If non-zero exist if returned and ``-f/--fix``
has been passed to the commandline run the
``fix`` method, otherwise raise ``AuditError``.
"""
@_abstractmethod
def fix(self, *args: _Any, **kwargs: bool) -> int:
"""Run if audit fails but only if running a fix.
:param args: Args that can be passed from other plugins.
:param kwargs: Boolean flags for subprocesses.
:return: If any error has not been raised for any reason
int object must be returned, from subprocess or
written, to notify __call__ whether process has
succeeded or failed.
"""
@_check_command
def __call__(self, *args: _Any, **kwargs: bool) -> _Any:
with _TempEnvVar(_os.environ, **self.env):
try:
return self.audit(*args, **kwargs)
except _CalledProcessError as err:
if kwargs.get("fix", False):
return self.fix(**kwargs)
raise self.audit_error() from err
class Action(Plugin): # pylint: disable=too-few-public-methods
"""Blueprint for writing generic plugins.
Called within context of defined environment variables.
If no environment variables are defined nothing will change.
:raises CalledProcessError: Will always be raised if something
fails that is not to do with the
action condition.
Will be excepted and reraised as
``AuditError`` if the action fails.
:raises AuditError: Raised from ``CalledProcessError``
if action fails.
:return: Any value and type can be returned.
"""
@_abstractmethod
def action(self, *args: _Any, **kwargs: bool) -> _Any:
"""All logic to be written within this method.
:param args: Args that can be passed from other plugins.
:param kwargs: Boolean flags for subprocesses.
:return: Any value and type can be returned.
"""
def __call__(self, *args: _Any, **kwargs: bool) -> _Any:
with _TempEnvVar(_os.environ, **self.env):
try:
return self.action(*args, **kwargs)
except _CalledProcessError as err:
raise self.audit_error() from err
class Parametrize(Plugin): # pylint: disable=too-few-public-methods
"""Define a list of strings to call multiple plugins.
:raises CalledProcessError: Will always be raised if something
fails that is not to do with the
called plugin's condition.
Will be excepted and reraised as
``AuditError`` if the called plugin
fails and the called plugin does not
specify a ``fix`` method or the
``-f/--fix`` flag is not passed to the
commandline.
:raises AuditError: Raised from ``CalledProcessError``
if called plugin fails and no ``fix``
method is specified or the ``-f/--fix``
flag is not passed to the commandline.
"""
@_abstractmethod
def plugins(self) -> _List[str]:
"""List of plugin names to run.
:return: List of plugin names, as defined in ``@register``.
"""
def __call__(self, *args: _Any, **kwargs: bool) -> None:
for name in self.plugins():
_colors.cyan.bold.print(f"\n{_NAME} {name}")
_plugins[name](*args, **kwargs)
class Write(Plugin):
"""Blueprint for writing file manipulation processes.
Announce:
- If the file did not exist and a file has been created
- If the file did exist and the file has not been changed
- If the file did exist and the file has been changed
"""
def required(self) -> _Optional[_Path]:
"""Pre-requisite for working on file (if there is one).
:return: Path object, otherwise None.
"""
@property
@_abstractmethod
def path(self) -> _Path:
"""Path to file, absolute or relative, that will be worked on.
:return: Returned value needs to be a Path object.
"""
def write(self, *args: _Any, **kwargs: bool) -> _Any:
"""All write logic to be written within this method.
:param args: Args that can be passed from other plugins.
:param kwargs: Boolean flags for subprocesses.
"""
def __call__(self, *args: _Any, **kwargs: bool) -> None:
if (
self.required() is None # type: ignore
or self.required().exists() # type: ignore
):
path = _Path(self.path)
print(f"Updating ``{path}``")
with _HashCap(path) as cap:
self.write(*args, **kwargs)
if cap.new:
print(f"created ``{path.name}``")
elif cap.compare:
print(f"``{path.name}`` is already up to date")
else:
print(f"updated ``{path.name}``")
class FixFile(Plugin):
"""Blueprint for writing audit and fix plugins for individual files.
All logic can act on each file that would be passed from __call__.
Called within context of defined environment variables.
If no environment variables are defined nothing will change.
Condition for failure needs to be defined, as the file argument
passed from outer loop will not return an exit status.
If audit fails and the ``-f/--fix`` flag is passed to the
commandline the ``fix`` method will be called within the
``CalledProcessError`` try-except block.
If ``-f/--fix`` and the audit fails the user is running the
audit only and will raise an ``AuditError``.
:raises CalledProcessError: Will always be raised if something
fails that is not to do with the
audit condition.
Will be excepted and reraised as
``AuditError`` if the audit fails and
``-f/--fix`` is not passed to the
commandline.
:raises AuditError: Raised from ``CalledProcessError``
if audit fails and ``-f/--fix`` flag
if not passed to the commandline.
:return: Only 0 exit-status can be returned. If
process fails error will be raised.
"""
@_abstractmethod
def fail_condition(self) -> _Optional[bool]:
"""Condition to trigger non-subprocess failure."""
@_abstractmethod
def audit(self, file: _Path, **kwargs: bool) -> None:
"""All logic written within this method for each file's audit.
:param file: Individual file.
:param kwargs: Boolean flags for subprocesses.
"""
@_abstractmethod
def fix(self, file: _Path, **kwargs: bool) -> None:
"""All logic written within this method for each file's fix.
:param file: Individual file.
:param kwargs: Boolean flags for subprocesses.
"""
@_check_command
def __call__(self, *args, **kwargs: bool) -> _Any:
files = [p for p in _files if p.is_file()]
for file in files:
self.audit(file, **kwargs)
fail = self.fail_condition()
if fail is not None and fail:
if kwargs.get("fix", False):
self.fix(file, **kwargs)
else:
raise self.audit_error()
# if no error raised return 0 to decorator
return 0
# array of plugins
PLUGINS = [Audit, Fix, Action, Parametrize, Write, FixFile]
# array of plugin names
PLUGIN_NAMES = [t.__name__ for t in PLUGINS]
# array of plugin types before instantiation
PluginType = _Union[
_Type[Audit],
_Type[Fix],
_Type[Action],
_Type[Parametrize],
_Type[Write],
_Type[FixFile],
]
# array of plugin types after instantiation
PluginInstance = _Union[Audit, Fix, Action, Parametrize, Write, FixFile]
class _Plugins(_MutableMapping): # pylint: disable=too-many-ancestors
"""Holds registered plugins.
Instantiate plugin on running __setitem__.
:raise NameConflictError: If name of registered plugin is not
unique.
:raise TypeError: If non plugin type registered.
"""
def __setitem__(self, name: str, plugin: PluginType) -> None:
# only unique names to be set in `plugins` object
# if name is not unique raise `NameConflictError`
if name in self:
raise _exceptions.NameConflictError(plugin.__name__, name)
if (
not hasattr(plugin, "__bases__")
or plugin.__bases__[0].__name__ not in PLUGIN_NAMES
):
raise TypeError(
"can only register one of the following: "
+ ", ".join(PLUGIN_NAMES)
)
super().__setitem__(name, plugin(name))
_plugins = _Plugins()
def register(name: str) -> _Callable[..., PluginType]:
"""Register subclassed plugin to collection.
:param name: Name to register plugin as.
:return: Return registered plugin to call.
"""
def _register(plugin: PluginType):
_plugins[name] = plugin
return plugin
return _register
def mapping() -> _Dict[str, PluginInstance]:
"""Get dict of named keys and their corresponding plugin values.
:return: Mapping of plugins and their unique names.
"""
return dict(_plugins)
def registered() -> _List[str]:
"""Get list of registered plugins.
:return: List of registered plugins.
"""
return sorted(list(_plugins))
def get(name: str) -> PluginInstance:
"""Get plugins by name.
:param name: Unique name of plugin.
:return: Callable plugin instance.
"""
return _plugins[name]
def load() -> None:
"""Import all registered plugins from provided plugin paths."""
for plugin_path in _plugin_paths:
_sys.path.append(str(plugin_path.parent))
if plugin_path.is_dir():
for path in plugin_path.iterdir():
if (
not path.name.startswith("_")
and not path.name.startswith(".")
and path.name.endswith(".py")
):
_importlib.import_module(
f"{plugin_path.name}.{path.name.replace('.py', '')}"
)
|
the-stack_106_32080 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc
class MetricServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.monitoring.v3 MetricService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/monitoring.read',
'https://www.googleapis.com/auth/monitoring.write',
)
def __init__(self,
channel=None,
credentials=None,
address='monitoring.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'metric_service_stub':
metric_service_pb2_grpc.MetricServiceStub(channel),
}
@classmethod
def create_channel(cls,
address='monitoring.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
options={
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
}.items(),
)
@property
def list_monitored_resource_descriptors(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'metric_service_stub'].ListMonitoredResourceDescriptors
@property
def get_monitored_resource_descriptor(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'metric_service_stub'].GetMonitoredResourceDescriptor
@property
def list_metric_descriptors(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].ListMetricDescriptors
@property
def get_metric_descriptor(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets a single metric descriptor. This method does not require a Stackdriver account.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].GetMetricDescriptor
@property
def create_metric_descriptor(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a new metric descriptor. User-created metric descriptors define
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`__.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].CreateMetricDescriptor
@property
def delete_metric_descriptor(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a metric descriptor. Only user-created `custom
metrics <https://cloud.google.com/monitoring/custom-metrics>`__ can be
deleted.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].DeleteMetricDescriptor
@property
def list_time_series(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists time series that match a filter. This method does not require a Stackdriver account.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].ListTimeSeries
@property
def create_time_series(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates or adds data to one or more time series.
The response is empty if all time series in the request were written.
If any time series could not be written, a corresponding failure message is
included in the error response.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['metric_service_stub'].CreateTimeSeries
|
the-stack_106_32082 | from .restful_model_collection import RestfulModelCollection
from .errors import FileUploadError
from six import StringIO
import base64
import json
class NylasAPIObject(dict):
attrs = []
# The Nylas API holds most objects for an account directly under '/',
# but some of them are under '/a' (mostly the account-management
# and billing code). api_root is a tiny metaprogramming hack to let
# us use the same code for both.
api_root = 'n'
def __init__(self, cls, api):
self.id = None
self.cls = cls
self.api = api
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getattr__ = dict.get
@classmethod
def create(cls, api, **kwargs):
object_type = kwargs.get('object')
if (object_type and object_type != cls.__name__.lower() and
object_type != 'account'):
# We were given a specific object type and we're trying to
# instantiate something different; abort. (Relevant for folders
# and labels API.)
# We need a special case for accounts because the /accounts API
# is different between the open source and hosted API.
return
obj = cls(api)
obj.cls = cls
for attr in cls.attrs:
# Support attributes we want to override with properties where
# the property names overlap with the JSON names (e.g. folders)
attr_name = attr
if attr_name.startswith('_'):
attr = attr_name[1:]
if attr in kwargs:
obj[attr_name] = kwargs[attr]
if 'id' not in kwargs:
obj['id'] = None
return obj
def as_json(self):
dct = {}
for attr in self.cls.attrs:
if hasattr(self, attr):
dct[attr] = getattr(self, attr)
return dct
def child_collection(self, cls, **filters):
return RestfulModelCollection(cls, self.api, **filters)
def save(self, **kwargs):
if self.id:
new_obj = self.api._update_resource(self.cls, self.id,
self.as_json(), **kwargs)
else:
new_obj = self.api._create_resource(self.cls,
self.as_json(), **kwargs)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
def update(self):
new_obj = self.api._update_resource(self.cls,
self.id, self.as_json())
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
class Message(NylasAPIObject):
attrs = ["bcc", "body", "cc", "date", "events", "files", "from", "id",
"account_id", "object", "snippet", "starred", "subject",
"thread_id", "to", "unread", "starred", "_folder", "_labels"]
collection_name = 'messages'
def __init__(self, api):
NylasAPIObject.__init__(self, Message, api)
@property
def attachments(self):
return self.child_collection(File, message_id=self.id)
@property
def folder(self):
# Instantiate a Folder object from the API response
if self._folder:
return Folder.create(self.api, **self._folder)
@property
def labels(self):
if self._labels:
return [Label.create(self.api, **l)
for l in self._labels]
else:
return []
def update_folder(self, folder_id):
update = {'folder': folder_id}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.folder
def update_labels(self, label_ids=[]):
update = {'labels': label_ids}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.labels
def add_labels(self, label_ids=[]):
labels = [l.id for l in self.labels]
labels = list(set(labels).union(set(label_ids)))
return self.update_labels(labels)
def add_label(self, label_id):
return self.add_labels([label_id])
def remove_labels(self, label_ids=[]):
labels = [l.id for l in self.labels]
labels = list(set(labels) - set(label_ids))
return self.update_labels(labels)
def remove_label(self, label_id):
return self.remove_labels([label_id])
def mark_as_seen(self):
self.mark_as_read()
def mark_as_read(self):
update = {'unread': False}
self.api._update_resource(self.cls, self.id, update)
self.unread = False
def mark_as_unread(self):
update = {'unread': True}
self.api._update_resource(self.cls, self.id, update)
self.unread = True
def star(self):
update = {'starred': True}
self.api._update_resource(self.cls, self.id, update)
self.starred = True
def unstar(self):
update = {'starred': False}
self.api._update_resource(self.cls, self.id, update)
self.starred = False
@property
def raw(self):
headers = {"Accept": "message/rfc822"}
data = self.api._get_resource_data(Message, self.id, headers=headers)
return data
class Folder(NylasAPIObject):
attrs = ["id", "display_name", "name", "object", "account_id"]
collection_name = "folders"
def __init__(self, api):
NylasAPIObject.__init__(self, Folder, api)
@property
def threads(self):
return self.child_collection({'in': self.id})
@property
def messages(self):
return self.child_collection({'in': self.id})
class Label(NylasAPIObject):
attrs = ["id", "display_name", "name", "object", "account_id"]
collection_name = "labels"
def __init__(self, api):
NylasAPIObject.__init__(self, Label, api)
@property
def threads(self):
return self.child_collection({'in': self.id})
@property
def messages(self):
return self.child_collection({'in': self.id})
class Thread(NylasAPIObject):
attrs = ["draft_ids", "id", "message_ids", "account_id", "object",
"participants", "snippet", "subject", "subject_date",
"last_message_timestamp", "first_message_timestamp",
"unread", "starred", "version", "_folders", "_labels",
"received_recent_date"]
collection_name = 'threads'
def __init__(self, api):
NylasAPIObject.__init__(self, Thread, api)
@property
def messages(self):
return self.child_collection(Message, thread_id=self.id)
@property
def drafts(self):
return self.child_collection(Draft, thread_id=self.id)
@property
def folders(self):
if self._folders:
return [Folder.create(self.api, **f)
for f in self._folders]
else:
return []
@property
def labels(self):
if self._labels:
return [Label.create(self.api, **l)
for l in self._labels]
else:
return []
def update_folder(self, folder_id):
update = {'folder': folder_id}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.folder
def update_labels(self, label_ids=[]):
update = {'labels': label_ids}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.labels
def add_labels(self, label_ids=[]):
labels = [l.id for l in self.labels]
labels = list(set(labels).union(set(label_ids)))
return self.update_labels(labels)
def add_label(self, label_id):
return self.add_labels([label_id])
def remove_labels(self, label_ids=[]):
labels = [l.id for l in self.labels]
labels = list(set(labels) - set(label_ids))
return self.update_labels(labels)
def remove_label(self, label_id):
return self.remove_labels([label_id])
def mark_as_seen(self):
self.mark_as_read()
def mark_as_read(self):
update = {'unread': False}
self.api._update_resource(self.cls, self.id, update)
self.unread = False
def mark_as_unread(self):
update = {'unread': True}
self.api._update_resource(self.cls, self.id, update)
self.unread = True
def star(self):
update = {'starred': True}
self.api._update_resource(self.cls, self.id, update)
self.starred = True
def unstar(self):
update = {'starred': False}
self.api._update_resource(self.cls, self.id, update)
self.starred = False
def create_reply(self):
d = self.drafts.create()
d.thread_id = self.id
d.subject = self.subject
return d
# This is a dummy class that allows us to use the create_resource function
# and pass in a 'Send' object that will translate into a 'send' endpoint.
class Send(Message):
collection_name = 'send'
def __init__(self, api):
NylasAPIObject.__init__(self, Send, api)
class Draft(Message):
attrs = ["bcc", "cc", "body", "date", "files", "from", "id",
"account_id", "object", "subject", "thread_id", "to",
"unread", "version", "file_ids", "reply_to_message_id",
"reply_to", "starred", "snippet"]
collection_name = 'drafts'
def __init__(self, api, thread_id=None):
Message.__init__(self, api)
NylasAPIObject.__init__(self, Thread, api)
self.file_ids = []
def attach(self, file):
if not file.id:
file.save()
self.file_ids.append(file.id)
def detach(self, file):
if file.id in self.file_ids:
self.file_ids.remove(file.id)
def send(self):
if not self.id:
data = self.as_json()
else:
data = {'draft_id': self.id}
if hasattr(self, 'version'):
data['version'] = self.version
msg = self.api._create_resource(Send, data)
if msg:
return msg
def delete(self):
if self.id and self.version:
data = {'version': self.version}
self.api._delete_resource(self.cls, self.id, data=data)
class File(NylasAPIObject):
attrs = ["content_type", "filename", "id", "content_id",
"account_id", "object", "size", "message_ids", ]
collection_name = 'files'
def save(self):
if hasattr(self, 'stream') and self.stream is not None:
data = {self.filename: self.stream}
elif hasattr(self, 'data') and self.data is not None:
data = {self.filename: StringIO(self.data)}
else:
raise FileUploadError(message=("File object not properly "
"formatted, must provide "
"either a stream or data."))
new_obj = self.api._create_resources(File, data)
new_obj = new_obj[0]
for attr in self.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
def download(self):
if not self.id:
raise FileUploadError(message=("Can't download a file that "
"hasn't been uploaded."))
return self.api._get_resource_data(File, self.id,
extra='download')
def __init__(self, api):
NylasAPIObject.__init__(self, File, api)
class Contact(NylasAPIObject):
attrs = ["id", "account_id", "name", "email", "object"]
collection_name = 'contacts'
def __init__(self, api):
NylasAPIObject.__init__(self, Contact, api)
class Calendar(NylasAPIObject):
attrs = ["id", "account_id", "name", "description", "read_only", "object"]
collection_name = 'calendars'
def __init__(self, api):
NylasAPIObject.__init__(self, Calendar, api)
@property
def events(self):
return self.child_collection(Event, calendar_id=self.id)
class Event(NylasAPIObject):
attrs = ["id", "account_id", "title", "description", "location",
"read_only", "when", "busy", "participants", "calendar_id",
"recurrence", "status", "master_event_id", "owner",
"original_start_time", "object"]
collection_name = 'events'
def __init__(self, api):
NylasAPIObject.__init__(self, Event, api)
def as_json(self):
dct = NylasAPIObject.as_json(self)
# Filter some parameters we got from the API
if dct.get('when'):
# Currently, the event (self) and the dict (dct) share the same
# reference to the `'when'` dict. We need to clone the dict so
# that when we remove the object key, the original event's
# `'when'` reference is unmodified.
dct['when'] = dct['when'].copy()
dct['when'].pop('object', None)
return dct
class Namespace(NylasAPIObject):
attrs = ["account", "email_address", "id", "account_id", "object",
"provider", "name", "organization_unit"]
collection_name = 'n'
def __init__(self, api):
NylasAPIObject.__init__(self, Namespace, api)
def child_collection(self, cls, **filters):
return RestfulModelCollection(cls, self.api, self.id, **filters)
class Account(NylasAPIObject):
api_root = 'a'
attrs = ["account_id", "trial", "trial_expires", "sync_state",
"billing_state", "account_id"]
collection_name = 'accounts'
def __init__(self, api):
NylasAPIObject.__init__(self, Account, api)
def as_json(self):
dct = NylasAPIObject.as_json(self)
return dct
def upgrade(self):
self.api._call_resource_method(self, self.account_id,
'upgrade', None)
def downgrade(self):
self.api._call_resource_method(self, self.account_id,
'downgrade', None)
class APIAccount(NylasAPIObject):
attrs = ["email_address", "id", "account_id", "object",
"provider", "name", "organization_unit"]
collection_name = 'accounts'
def __init__(self, api):
NylasAPIObject.__init__(self, APIAccount, api)
def as_json(self):
dct = NylasAPIObject.as_json(self)
return dct
class SingletonAccount(APIAccount):
# This is an APIAccount that lives under /account.
collection_name = 'account'
|
the-stack_106_32083 | import msgpack
import mmap
import re
import sys
file_path = sys.argv[1]
out_file_path = sys.argv[2]
num_rows = int(sys.argv[3])
discrete_query_col_index = int(sys.argv[4])
num_query_col_index = int(sys.argv[5])
compression_method = sys.argv[6]
compression_level = sys.argv[7]
if compression_method == "bz2":
import bz2 as cmpr
elif compression_method == "gz":
import gzip as cmpr
elif compression_method == "lzma":
import lzma as cmpr
elif compression_method == "snappy":
import snappy as cmpr
else:
print("No matching compression method")
sys.exit(1)
def find_col_coords(col_indices):
for col_index in col_indices:
start_pos = col_index * max_column_coord_length
next_start_pos = start_pos + max_column_coord_length
yield [int(x) for x in cc_map_file[start_pos:next_start_pos].rstrip().split(b",")]
def parse_row(row_index):
row_start = row_start_dict[row_index]
if row_index == num_rows -1:
compressed_line = data_map_file[row_start:len(data_map_file)]
else:
row_end = row_start_dict[row_index + 1]
compressed_line = data_map_file[row_start:row_end]
return cmpr.decompress(compressed_line)
def parse_row_values(row_index, col_coords):
line = parse_row(row_index)
for coords in col_coords:
yield line[coords[0]:coords[0] + coords[1]].rstrip()
def query_cols(row_indices):
matching_row_indices = []
discrete_coords = list(find_col_coords([discrete_query_col_index]))[0]
num_coords = list(find_col_coords([num_query_col_index]))[0]
for row_index in row_indices:
discrete_value = parse_row(row_index)[discrete_coords[0]:discrete_coords[0] + discrete_coords[1]].rstrip()
num_value = float(parse_row(row_index)[num_coords[0]:num_coords[0] + num_coords[1]].rstrip())
if (discrete_value.startswith(b"A") or discrete_value.endswith(b"Z")) and num_value >= 0.1:
matching_row_indices.append(row_index)
return matching_row_indices
with open(file_path + ".rowdict", 'rb') as rowdict_file:
row_start_dict = msgpack.unpackb(rowdict_file.read(), raw=False)
with open(file_path + ".ll", 'rb') as ll_file:
line_length = int(ll_file.read().rstrip())
with open(file_path + ".mccl", 'rb') as mccl_file:
max_column_coord_length = int(mccl_file.read().rstrip())
with open(file_path + ".cc", 'rb') as cc_file:
cc_map_file = mmap.mmap(cc_file.fileno(), 0, prot=mmap.PROT_READ)
with open(file_path, 'rb') as my_file:
data_map_file = mmap.mmap(my_file.fileno(), 0, prot=mmap.PROT_READ)
with open(out_file_path, 'wb') as out_file:
num_cols = int(len(cc_map_file) / max_column_coord_length)
out_col_indices = range(0, num_cols, 100)
out_col_coords = list(find_col_coords(out_col_indices))
# Header line
out_file.write(b"\t".join(parse_row_values(0, out_col_coords)).rstrip() + b"\n")
matching_row_indices = query_cols(range(1, num_rows))
chunk_size = 1000
out_lines = []
for row_index in matching_row_indices:
out_lines.append(b"\t".join(parse_row_values(row_index, out_col_coords)).rstrip())
if len(out_lines) % chunk_size == 0:
out_file.write(b"\n".join(out_lines) + b"\n")
out_lines = []
if len(out_lines) > 0:
out_file.write(b"\n".join(out_lines) + b"\n")
data_map_file.close()
cc_map_file.close()
|
the-stack_106_32085 | """Script to download the 20 newsgroups text classification set"""
import os
import tarfile
from contextlib import closing
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = URL.rsplit('/', 1)[1]
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
if not os.path.exists(TRAIN_FOLDER) or not os.path.exists(TEST_FOLDER):
if not os.path.exists(ARCHIVE_NAME):
print("Downloading dataset from %s (14 MB)" % URL)
opener = urlopen(URL)
with open(ARCHIVE_NAME, 'wb') as archive:
archive.write(opener.read())
print("Decompressing %s" % ARCHIVE_NAME)
with closing(tarfile.open(ARCHIVE_NAME, "r:gz")) as archive:
archive.extractall(path='.')
os.remove(ARCHIVE_NAME)
|
the-stack_106_32086 | '''
*
* Copyright (C) 2020 Universitat Politècnica de Catalunya.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
'''
# -*- coding: utf-8 -*-
""" This module defines a generic way to communicate with the MySQL database.
This module takes advantage of the regular database structure to ease the
requests and data management.
It basically has two different classes: db and Connector.
- db: defines some standard methods to create the most basic query structures
for accessing database tables.
- Connector: uses db class to represent a database table inside the
application.
To search for an item in the database you have to create a Connector passing
as a parameter a string with the table you want to load data from, then call
the method 'load' passing as a parameter the id as an integer or the 'hash' as
a string to look for, and the Connector creates the request and searches the
item in the database.
Once you have the item you can just get the related data calling the 'get'
function with the table you want to take the data from. The 'get' function
looks for the related data using the previously gotten element id and it
returns a list of Connectors containing the data. You can then look for other
related data using the returned Connectors.
Inside each connector there is a parameter called 'values' that contains a list
with all the selected table column information (the selected element properties).
You can then modify those values and call the method 'save' to update the
values in the database table.
When you want to create a new row in a table you only need to create a new
Connector to that table, call the 'load' method with the new hash you want
to insert in the table, and if the element doesn't already exist the method
will return a new Connector element with all the columns of the table
initialized as None values. You only have to insert the values in the
list, and when you call the 'save' method it will insert the row into the
table as a new row.
There is an extra function called 'get_all' that returns all the elements
inside the table of the Connector. If for example you want to take all the
domains, you call a new Connector passing 'domain' as the table name,
and then call the 'get_all' function to get all the domains. The results are
returned as a list of Connectors representing the given data.
This way of management simplifies a lot the database requests needed inside the
code but clearly overgenerates requests. For the sake of speed and performance
there are some other specific requests included in the Connector to get some
extensive data that will slow the loading a lot using only the simple methods.
Last, there is a function called 'custom' where you can generate a custom
request for specific reasons.
"""
# Basic modules
import MySQLdb
import re
import config
import logging
import logging.config
from utils import hash_string
logging.config.fileConfig('../logging.conf')
logger = logging.getLogger("DB_MANAGER")
CROSS_TABLES = ["domain_subdomain", "domain_category", "domain_third_party", "domain_url",
"pattern_url", "resource_fingerprint"]
class Db(object):
"""
This class manages the basic database operations. It defines the most
basic requests taking into account the database table definitions to
make easier the data management.
"""
def __init__(self):
self.host = config.MYSQL_HOST
self.user = config.MYSQL_USER
self.password = config.MYSQL_PASSWORD
self.db = config.MYSQL_DB
self.conn = MySQLdb.connect(host=self.host,port=3306,user=self.user, passwd=self.password, db=self.db,
use_unicode=True, charset='utf8mb4')
def close(self):
""" Closes the connection to the database. """
self.conn.close()
def initialize(self, sites, start, timestamp):
""" initializes the database with the Alexa's list domain information. """
for i, domain in enumerate(sites, start + 1):
# domain = extract_domain(domain)
print(str(i) + ": " + domain)
hash_key = hash_string(domain)
element = {"hash": hash_key, "name": domain, "rank": i, "insert_date": timestamp}
element_id = self.custom(query="SELECT id FROM domain WHERE domain.hash = %s", values=[hash_key])
if not element_id:
self.insert("domain", element)
else:
element["id"] = element_id[0]["id"]
self.update("domain", element)
def __select(self, fields, tables, conditions, order, values, log=None):
""" Creates a standard SELECT request. """
request = "SELECT "
field_list = ", ".join(fields)
request += field_list
request += " FROM " + ", ".join(tables)
if conditions:
cond_list = " WHERE "
for index, cond in enumerate(conditions):
cond_list += "(" + cond
if values[index] == "NULL":
cond_list += " IS %s)"
values[index] = None
elif values[index] == "NOT NULL":
cond_list += " IS NOT %s)"
values[index] = None
else:
cond_list += " = %s)"
if index < len(conditions) - 1:
cond_list += " AND "
request += cond_list
if order:
# request += " ORDER BY '"+"', '".join(order)+"'"
request += " ORDER BY " + ", ".join(order)
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
results = []
try:
if values:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
else:
if log:
logger.debug(request)
cursor.execute(request)
except MySQLdb.Error as error:
if values:
logger.error(request % tuple(values))
else:
logger.error(request)
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
else:
for row in cursor.fetchall():
result = {}
for key in row.keys():
result[key] = row[key]
if row[key] == "NULL":
result[key] = None
results.append(result)
if log:
logger.debug("REQUEST OK. Results: " + str(results) + "\n-----------------")
cursor.close()
return results
def __insert(self, table, fields, values, log=None):
""" Creates a standard INSERT request. """
if fields and len(fields) != len(values):
logger.warning("Incorrect number of field/values")
return 0
request = "INSERT INTO " + table
if fields:
request += " (" + fields[0]
if len(fields) > 1:
for index in range(1, len(fields)):
if "rank" in fields[index]:
request += ", `" + fields[index] + "`"
else:
request += ", " + fields[index]
request += ")"
request += " VALUES (%s"
if len(values) > 1:
for index in range(1, len(values)):
request += ", %s"
request += ")"
request += " ON DUPLICATE KEY UPDATE "
if fields:
request += fields[0]+"=%s"
if len(fields) > 1:
for index in range(1, len(fields)):
if "rank" in fields[index]:
request += ", `" + fields[index] + "`=%s"
else:
request += ", " + fields[index] + "=%s"
new_values = values.copy()
for value in new_values:
values.append(value)
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK. Id: " + str(cursor.lastrowid) + "\n-----------------")
last_row_id = cursor.lastrowid
cursor.close()
return last_row_id
def __update(self, table, fields, conditions, values, log=None):
""" Creates a standard UPDATE request. """
if fields and len(fields) + len(conditions) != len(values):
logger.warning("Incorrect number of fields/conditions/values")
return 0
request = "UPDATE IGNORE " + table
request += " SET " + fields[0] + " = %s"
if len(fields) > 1:
for index in range(1, len(fields)):
request += ", " + fields[index] + " = %s"
request += " WHERE " + conditions[0] + " = %s"
if len(conditions) > 1:
for index in range(1, len(conditions)):
request += " AND " + conditions[index] + " = %s"
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
cursor.close()
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK.\n-----------------")
cursor.close()
return -1
def _delete(self, table, conditions, values, log=None):
""" Creates a standard DELETE request. """
request = "DELETE FROM " + table
request += " WHERE " + conditions[0] + " = %s"
if len(conditions) > 1:
for index in range(1, len(conditions)):
request += " AND " + conditions[index] + " = %s"
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
cursor.close()
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK.\n-----------------")
cursor.close()
return 1
def custom(self, query, values=None, log=None):
""" Creates a custom request. """
if values is None:
values = []
request = query
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
results = []
try:
if values:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
else:
if log:
logger.debug(request)
cursor.execute(request)
except MySQLdb.Error as error:
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
else:
if re.match("DELETE", request) is not None:
self.conn.commit()
elif re.match("INSERT", request) is not None:
self.conn.commit()
elif re.match("UPDATE", request) is not None:
self.conn.commit()
for row in cursor.fetchall():
result = {}
for key in row.keys():
result[key] = row[key]
results.append(result)
if log:
logger.debug("REQUEST OK. Results: " + str(results) + "\n-----------------")
cursor.close()
return results
def call(self, name, values=None, log=None):
""" Calls a stored procedure. """
if values is None:
values = []
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
results = []
try:
if values:
if log:
logger.debug("PROCEDURE CALL: " + name + "| PARAMETERS: " + str(tuple(values)))
cursor.callproc(name, tuple(values))
else:
if log:
logger.debug("PROCEDURE CALL: " + name)
cursor.callproc(name)
except MySQLdb.Error as error:
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
else:
self.conn.commit()
for row in cursor.fetchall():
result = {}
for key in row.keys():
result[key] = row[key]
results.append(result)
if log:
logger.debug("REQUEST OK. Results: " + str(results) + "\n-----------------")
cursor.close()
return results
def select(self, fields, tables, conditions, order, values, log=None):
""" Calls the internal __select function. """
result = self.__select(fields, tables, conditions, order, values, log)
return result
def insert(self, table, element, log=None):
""" Insert the element if it can be updated first (doesn't exists). """
update = self.update(table, element, log)
if update:
return update
fields = []
values = []
for key in element.keys():
fields.append(key)
values.append(element[key])
result = self.__insert(table, fields, values, log)
return result
def update(self, table, element, log=None):
""" Update the table for the given element id. """
if "id" not in element.keys():
return 0
fields = []
conditions = []
values = []
for key in element.keys():
if key != "id":
fields.append(key)
values.append(element[key])
conditions.append("id")
values.append(element["id"])
result = self.__update(table, fields, conditions, values, log)
if result == -1:
return element["id"]
return result
def clicked(self, domain, clicked):
if clicked:
domain.values["clicked"] = 1
return domain.save()
return 0
#domain_name = domain.values["name"]
#query = f'UPDATE ORMclick.domain SET ORMclick.domain.clicked = {clicked} WHERE ORMclick.domain.name = "{domain_name}"'
#result = self.custom(query)
#return result
def delete(self, table, element, log=None):
""" Removes the element from the table. """
conditions = ["id"]
values = element["id"]
result = self._delete(table, conditions, [values], log)
return result
class Connector(object):
"""
This class defines the basic objects used for accessing the database
(one object per table), and the getters and setters for them.
This makes the data management a lot easier at the cost of an increased
number of database requests.
"""
def __init__(self, db, table, order=None, log=False):
self.table = table
self.log = log
self.db = db
self.db.conn.ping()
if order:
self.order = [order]
else:
self.order = []
self.values = {}
def __str__(self):
return str(self.values)
def __eq__(self, other):
if not isinstance(other, Connector):
# don't attempt to compare against unrelated types
return NotImplemented
if len(self.values) != len(other.values):
return False
for key in self.values:
if key not in other.values:
return False
if self.values[key] != other.values[key]:
return False
return True
def load(self, value, args=None):
""" Loads the element depending on the given value. """
if args is None:
args = {}
conditions = ["id"]
values = [value]
if isinstance(value, str):
conditions = ["hash"]
self.values[conditions[0]] = value
for key in args.keys():
conditions.append(key)
values.append(args[key])
result = self.db.select("*", [self.table], conditions, self.order, values, self.log)
if not result:
pragma = self.db.custom("desc %s" % self.table)
for i in pragma:
if not i["Default"]:
self.values[i["Field"]] = None
else:
self.values[i["Field"]] = i["Default"]
self.values[conditions[0]] = value
return 0
if len(result) > 1:
logger.warning("Loading " + self.table + " '" + str(value) + "': Too many query results")
return 0
self.values = result[0]
return self.values["id"]
def save(self):
""" Saves the element values in the corresponding table. """
# nulls = []
if "id" in self.values.keys() and not self.values["id"]:
del self.values["id"]
response = self.db.insert(self.table, self.values, self.log)
if not response:
return 0
self.load(response)
return 1
def delete(self):
""" Deletes the element inside the Connector. """
response = self.db.delete(self.table, self.values, self.log)
if not response:
return 0
# self.values = {}
return response
def get(self, etype, order="id", args=None):
""" Get the relatives of the given type.
If args is not empty it will get only the elements that comply with
the conditions specified as the dict keys and the values inside each
condition. """
if args is None:
args = {}
if etype + "_id" in self.values.keys():
if not self.values[etype + "_id"]:
return None
element = type(self)
element = element(self.db, etype)
element.load(self.values[etype + "_id"])
return element
requests = []
tables = []
conditions = [self.table + "_id"]
orders = [order]
values = [self.values["id"]]
for key in args.keys():
conditions.append(key)
values.append(args[key])
if self.table + "_" + etype in CROSS_TABLES:
requests.append("DISTINCT " + etype + "_id")
tables.append(self.table + "_" + etype)
elif etype + "_" + self.table in CROSS_TABLES:
requests.append("DISTINCT " + etype + "_id")
tables.append(etype + "_" + self.table)
else:
requests.append("DISTINCT id")
tables.append(etype)
ids = self.db.select(requests, tables, conditions, orders, values, self.log)
elements = []
for index in ids:
element = type(self)
element = element(self.db, etype)
element.load(index[requests[0].replace("DISTINCT ", "")])
elements.append(element)
return elements
def add(self, element, args=None):
""" Add a new relation with the given element.
This function is only used to create relations where exists
specific relation tables in the form element1_id, element2_id. """
if args is None:
args = {}
requests = ["DISTINCT id"]
tables = []
conditions = [self.table + "_id", element.table + "_id"]
orders = ["id"]
values = [self.values["id"], element.values["id"]]
if self.table + "_" + element.table in CROSS_TABLES:
tables.append(self.table + "_" + element.table)
elif element.table + "_" + self.table in CROSS_TABLES:
tables.append(element.table + "_" + self.table)
else:
return 0
ids = self.db.select(requests, tables, conditions, orders, values, self.log)
new_element = type(self)
new_element = new_element(self.db, tables[0])
if not ids:
new_element.values[self.table + "_id"] = self.values["id"]
new_element.values[element.table + "_id"] = element.values["id"]
else:
new_element.load(ids[0]["id"])
for key in args.keys():
if key == "insert_date" and "insert_date" in new_element.values.keys() and \
new_element.values["insert_date"]:
continue
new_element.values[key] = args[key]
if not new_element.save():
new_element.load(ids[0]["id"])
return new_element
def add_double(self, element1, element2, args=None):
""" Add a new relation with the given element.
This function is only used to create relations where exists
specific relation tables in the form element1_id, element2_id. """
if args is None:
args = {}
requests = ["DISTINCT id"]
tables = []
conditions = [self.table + "_id", element1.table + "_id", element2.table + "_id"]
if "resource_id" in args.keys():
conditions.append("resource_id")
orders = ["id"]
values = [self.values["id"], element1.values["id"], element2.values["id"]]
if "resource_id" in args.keys():
values.append(args["resource_id"])
if self.table + "_" + element1.table in CROSS_TABLES:
tables.append(self.table + "_" + element1.table)
elif element1.table + "_" + self.table in CROSS_TABLES:
tables.append(element1.table + "_" + self.table)
elif self.table + "_" + element2.table in CROSS_TABLES:
tables.append(self.table + "_" + element2.table)
elif element2.table + "_" + self.table in CROSS_TABLES:
tables.append(element2.table + "_" + self.table)
else:
return 0
ids = self.db.select(requests, tables, conditions, orders, values, self.log)
new_element = type(self)
new_element = new_element(self.db, tables[0])
if not ids:
new_element.values[self.table + "_id"] = self.values["id"]
new_element.values[element1.table + "_id"] = element1.values["id"]
new_element.values[element2.table + "_id"] = element2.values["id"]
else:
new_element.load(ids[0]["id"])
for key in args.keys():
if key == "insert_date" and "insert_date" in new_element.values.keys() and \
new_element.values["insert_date"]:
continue
new_element.values[key] = args[key]
if not new_element.save():
new_element.load(ids[0]["id"])
return new_element
def remove(self, element):
""" Removes the relation between two different items. """
requests = ["id"]
tables = []
conditions = [self.table + "_id", element.table + "_id"]
orders = ["id"]
values = [self.values["id"], element.values["id"]]
if self.table + "_" + element.table in CROSS_TABLES:
tables.append(self.table + "_" + element.table)
elif element.table + "_" + self.table in CROSS_TABLES:
tables.append(element.table + "_" + self.table)
else:
return 0
ids = self.db.select(requests, tables, conditions, orders, values, self.log)
if not ids:
return 1
values = {"id": ids[0]["id"]}
return Db().delete(tables[0], values)
def clean(self, etype, args=None):
""" Removes all the elements of the given type that relates to the Connector. """
if args is None:
args = {}
conditions = [self.table + "_id"]
values = [self.values["id"]]
for key in args.keys():
conditions.append(key)
values.append(args[key])
if self.table + "_" + etype in CROSS_TABLES:
tables = self.table + "_" + etype
elif etype + "_" + self.table in CROSS_TABLES:
tables = etype + "_" + self.table
else:
tables = etype
result = self.db._delete(tables, conditions, values, self.log)
return result
def get_all(self, args=None):
""" Gets ALL the table items in a collection of Connectors.
If args is not empty it will only get the elements that comply with
the passed conditions as the 'get' function. """
if args is None:
args = {}
requests = ["*"]
tables = [self.table]
conditions = []
orders = self.order
values = []
for key in args.keys():
conditions.append(key)
values.append(args[key])
ids = self.db.select(requests, tables, conditions, orders, values, self.log)
elements = []
for index in ids:
element = type(self)
element = element(self.db, self.table)
element.order = self.order
element.values = index
elements.append(element)
return elements
def get_property(self, prop="hash", args=None):
""" Gets ALL keys in a collection of Connectors.
If args is not empty it will only get the elements that comply with
the passed conditions as the 'get' function. """
if args is None:
args = {}
requests = ["DISTINCT " + prop]
tables = [self.table]
conditions = []
orders = self.order
values = []
for key in args.keys():
conditions.append(key)
values.append(args[key])
keys = []
results = self.db.select(requests, tables, conditions, orders, values, self.log)
for result in results:
keys.append(result[prop])
return keys
def count(self, args=None):
""" Counts the elements of the Connector table.
If args is not empty it will only count the elements that comply with
the passed conditions as the 'get' function. """
if args is None:
args = {}
conditions = []
values = []
for key in args.keys():
conditions.append(key)
values.append(args[key])
request = "SELECT COUNT (id) FROM " + self.table
if conditions:
request = request + " WHERE "
for index, cond in enumerate(conditions):
if values[index] == "NULL":
request += cond + " IS %s"
values[index] = None
elif values[index] == "NOT NULL":
request += cond + " IS NOT %s"
values[index] = None
else:
request += cond + " = %s"
if index < len(conditions) - 1:
request += " AND "
result = self.db.custom(request, values, self.log)
return result[0]["COUNT (id)"]
|
the-stack_106_32087 | from inspect import getsource
import urllib.request
import json
# from pkg_resources import SOURCE_DIST
from .models import Articles, Sources
api_key = None
sources_url = None
articles_url = None
def configure_request(app):
global api_key, sources_url, articles_url
api_key = app.config['NEWS_API_KEY']
def get_sources(category):
getSourcesUrl = sources_url.format(category, api_key)
with urllib.request.urlopen(getSourcesUrl) as url:
sources_data = url.read()
sources_response = json.loads(sources_data)
sources_results = None
if sources_response['sources']:
sources_list = sources_response['sources']
sources_results = process_sources(sources_list)
return sources_results
def process_sources(sources_list):
'''
Function that processesses the source results and turns them to a list of objects
'''
sources_results = []
for source in sources_list:
id = source.get('id')
name = source.get("name")
description = source.get('description')
url = source.get('url')
category = source.get('category')
language = source.get('language')
if language == 'en':
source_object = Sources(id, name, description, url, category, language)
sources_results.append(source_object)
return sources_results
def get_articles(id):
'''
function to get json response
'''
get_articles_url = articles_url.format(id, api_key)
with urllib.request.urlopen(get_articles_url) as url:
articles_data = url.read()
articles_response = json.loads(articles_data)
articles_results = None
if articles_response['articles']:
articles_list = articles_response['articles']
articles_results = process_articles(articles_list)
return articles_results
def process_articles(articles_list):
articles_results = []
for article in articles_list:
id = article['source']['id']
name = article.get('name')
author = article.get('author')
content = article.get('content')
title = article.get('title')
description = article.get('description')
url = article.get('url')
urlToImage = article.get('urlToImage')
publishedAt = article.get('publishedAt')
if urlToImage:
articles_object = Articles(
id, name, author, title, description, url, urlToImage, publishedAt, content)
articles_results.append(articles_object)
return articles_results
|
the-stack_106_32090 | # Tests for parsing of label queries
import unittest
import os
import sys
# Insert .. at the beginning of path so we use this version instead
# of something that's already been installed
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import musicbrainzngs
from musicbrainzngs import mbxml
class GetLabelTest(unittest.TestCase):
def setUp(self):
self.datadir = os.path.join(os.path.dirname(__file__), "data", "label")
def testLabelAliases(self):
fn = os.path.join(self.datadir, "022fe361-596c-43a0-8e22-bad712bb9548-aliases.xml")
res = mbxml.parse_message(open(fn))
aliases = res["label"]["alias-list"]
self.assertEqual(len(aliases), 4)
a0 = aliases[0]
self.assertEqual(a0["alias"], "EMI")
self.assertEqual(a0["sort-name"], "EMI")
a1 = aliases[1]
self.assertEqual(a1["alias"], "EMI Records (UK)")
self.assertEqual(a1["sort-name"], "EMI Records (UK)")
fn = os.path.join(self.datadir, "e72fabf2-74a3-4444-a9a5-316296cbfc8d-aliases.xml")
res = mbxml.parse_message(open(fn))
aliases = res["label"]["alias-list"]
self.assertEqual(len(aliases), 1)
a0 = aliases[0]
self.assertEqual(a0["alias"], "Ki/oon Records Inc.")
self.assertEqual(a0["sort-name"], "Ki/oon Records Inc.")
self.assertEqual(a0["begin-date"], "2001-10")
self.assertEqual(a0["end-date"], "2012-04")
|
the-stack_106_32091 | import unittest
from contextlib import contextmanager
from unittest import mock
import dbt.flags as flags
from dbt.adapters.snowflake import SnowflakeAdapter
from dbt.adapters.base.query_headers import MacroQueryStringSetter
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from dbt.parser.results import ParseResult
from snowflake import connector as snowflake_connector
from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection
class TestSnowflakeAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = False
profile_cfg = {
'outputs': {
'test': {
'type': 'snowflake',
'account': 'test_account',
'user': 'test_user',
'database': 'test_database',
'warehouse': 'test_warehouse',
'schema': 'public',
},
},
'target': 'test',
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'query-comment': 'dbt',
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.assertEqual(self.config.query_comment.comment, 'dbt')
self.assertEqual(self.config.query_comment.append, False)
self.handle = mock.MagicMock(
spec=snowflake_connector.SnowflakeConnection)
self.cursor = self.handle.cursor.return_value
self.mock_execute = self.cursor.execute
self.patcher = mock.patch(
'dbt.adapters.snowflake.connections.snowflake.connector.connect'
)
self.snowflake = self.patcher.start()
self.load_patch = mock.patch('dbt.parser.manifest.make_parse_result')
self.mock_parse_result = self.load_patch.start()
self.mock_parse_result.return_value = ParseResult.rpc()
self.snowflake.return_value = self.handle
self.adapter = SnowflakeAdapter(self.config)
self.adapter.connections.query_header = MacroQueryStringSetter(self.config, mock.MagicMock(macros={}))
self.qh_patch = mock.patch.object(self.adapter.connections.query_header, 'add')
self.mock_query_header_add = self.qh_patch.start()
self.mock_query_header_add.side_effect = lambda q: '/* dbt */\n{}'.format(q)
self.adapter.acquire_connection()
inject_adapter(self.adapter)
def tearDown(self):
# we want a unique self.handle every time.
self.adapter.cleanup_connections()
self.qh_patch.stop()
self.patcher.stop()
self.load_patch.stop()
def test_quoting_on_drop_schema(self):
self.adapter.drop_schema(
database='test_database',
schema='test_schema'
)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop schema if exists test_database."test_schema" cascade', None)
])
def test_quoting_on_drop(self):
relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_relation(relation)
self.mock_execute.assert_has_calls([
mock.call(
'/* dbt */\ndrop table if exists test_database."test_schema".test_table cascade',
None
)
])
def test_quoting_on_truncate(self):
relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.truncate_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ntruncate table test_database."test_schema".test_table', None)
])
def test_quoting_on_rename(self):
from_relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='table_a',
type='table',
quote_policy=self.adapter.config.quoting,
)
to_relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='table_b',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.rename_relation(
from_relation=from_relation,
to_relation=to_relation
)
self.mock_execute.assert_has_calls([
mock.call(
'/* dbt */\nalter table test_database."test_schema".table_a rename to test_database."test_schema".table_b',
None
)
])
@contextmanager
def current_warehouse(self, response):
# there is probably some elegant way built into mock.patch to do this
fetchall_return = self.cursor.fetchall.return_value
execute_side_effect = self.mock_execute.side_effect
def execute_effect(sql, *args, **kwargs):
if sql == '/* dbt */\nselect current_warehouse() as warehouse':
self.cursor.description = [['name']]
self.cursor.fetchall.return_value = [[response]]
else:
self.cursor.description = None
self.cursor.fetchall.return_value = fetchall_return
return self.mock_execute.return_value
self.mock_execute.side_effect = execute_effect
try:
yield
finally:
self.cursor.fetchall.return_value = fetchall_return
self.mock_execute.side_effect = execute_side_effect
def _strip_transactions(self):
result = []
for call_args in self.mock_execute.call_args_list:
args, kwargs = tuple(call_args)
is_transactional = (
len(kwargs) == 0 and
len(args) == 2 and
args[1] is None and
args[0] in {'BEGIN', 'COMMIT'}
)
if not is_transactional:
result.append(call_args)
return result
def test_pre_post_hooks_warehouse(self):
with self.current_warehouse('warehouse'):
config = {'snowflake_warehouse': 'other_warehouse'}
result = self.adapter.pre_model_hook(config)
self.assertIsNotNone(result)
calls = [
mock.call('/* dbt */\nselect current_warehouse() as warehouse', None),
mock.call('/* dbt */\nuse warehouse other_warehouse', None)
]
self.mock_execute.assert_has_calls(calls)
self.adapter.post_model_hook(config, result)
calls.append(mock.call('/* dbt */\nuse warehouse warehouse', None))
self.mock_execute.assert_has_calls(calls)
def test_pre_post_hooks_no_warehouse(self):
with self.current_warehouse('warehouse'):
config = {}
result = self.adapter.pre_model_hook(config)
self.assertIsNone(result)
self.mock_execute.assert_not_called()
self.adapter.post_model_hook(config, result)
self.mock_execute.assert_not_called()
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
model.handle.session_id = 42
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(
len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select system$abort_session(42)')
def test_client_session_keep_alive_false_by_default(self):
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key=None, application='dbt')
])
def test_client_session_keep_alive_true(self):
self.config.credentials = self.config.credentials.replace(
client_session_keep_alive=True)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=True, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key=None, application='dbt')
])
def test_user_pass_authentication(self):
self.config.credentials = self.config.credentials.replace(
password='test_password',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
password='test_password', role=None, schema='public',
user='test_user', warehouse='test_warehouse', private_key=None,
application='dbt')
])
def test_authenticator_user_pass_authentication(self):
self.config.credentials = self.config.credentials.replace(
password='test_password',
authenticator='test_sso_url',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
password='test_password', role=None, schema='public',
user='test_user', warehouse='test_warehouse',
authenticator='test_sso_url', private_key=None,
application='dbt')
])
def test_authenticator_externalbrowser_authentication(self):
self.config.credentials = self.config.credentials.replace(
authenticator='externalbrowser'
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', authenticator='externalbrowser',
private_key=None, application='dbt')
])
def test_authenticator_oauth_authentication(self):
self.config.credentials = self.config.credentials.replace(
authenticator='oauth',
token='my-oauth-token',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', authenticator='oauth', token='my-oauth-token',
private_key=None, application='dbt')
])
@mock.patch('dbt.adapters.snowflake.SnowflakeCredentials._get_private_key', return_value='test_key')
def test_authenticator_private_key_authentication(self, mock_get_private_key):
self.config.credentials = self.config.credentials.replace(
private_key_path='/tmp/test_key.p8',
private_key_passphrase='p@ssphr@se',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key='test_key',
application='dbt')
])
@mock.patch('dbt.adapters.snowflake.SnowflakeCredentials._get_private_key', return_value='test_key')
def test_authenticator_private_key_authentication_no_passphrase(self, mock_get_private_key):
self.config.credentials = self.config.credentials.replace(
private_key_path='/tmp/test_key.p8',
private_key_passphrase=None,
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key='test_key',
application='dbt')
])
|
the-stack_106_32095 | """
Multiple plots vignette
========================
Demo multiple plots and style the figure.
"""
import matplotlib.pyplot as plt
ax = plt.subplot(2, 1, 1)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.text(-0.05, 1.02, " Multiplot: plt.subplot(...)\n",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
bbox=dict(facecolor='white', alpha=1.0, width=400, height=65),
transform=ax.transAxes)
plt.text(-0.05, 1.01, "\n\n Plot several plots at once ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=ax.transAxes)
ax = plt.subplot(2, 2, 3)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax = plt.subplot(2, 2, 4)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.show()
|
the-stack_106_32096 | from __future__ import print_function
from __future__ import division
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
import os
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def augmentation(image_path, iters, file_list):
image_datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
data_format='channels_last')
for image_name in file_list:
image = load_img(os.path.join(os.path.abspath(image_path), image_name))
try:
# Image is converted to array type, because flow() receives numpy array as parameter
image_arr = img_to_array(image, data_format="channels_last")
image_arr = image_arr.reshape((1,) + image_arr.shape)#flow() requires data to be 4 dimensions
i = 0
for _ in image_datagen.flow(
image_arr, # this is the target directory
batch_size=1,
save_to_dir=image_path,
save_prefix='aug',
save_format='jpg'):
i += 1
if i >= iters:
break
except:
print('Skip image ', image_name)
continue
if __name__ == '__main__':
dataset_path = './ChineseFood/images/'
filelist = os.listdir(dataset_path)
for filename in filelist:
if filename.startswith('.'):
filelist.remove(file_name)
filelist.sort()
image_size = 2000 # about 2000 images every classify
i = 0
class_num = len(filelist)
for filename in filelist:
image_path = os.path.join(os.path.abspath(dataset_path), filename)
file_list = os.listdir(image_path)
for file_name in file_list:
if file_name.startswith('.'):
file_list.remove(file_name)
image_num = len(file_list)
iters = (image_size - image_num) // image_num # '//' has the same function as 'floor'
print('Have done: {}/{}. Doing: {}.'.format(i, class_num, filename))
augmentation(image_path, iters, file_list)
i += 1
print('All done!')
|
the-stack_106_32097 | """
SOILICE to mrfso converter
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import cmor
import cdms2
import logging
import numpy as np
from e3sm_to_cmip.lib import handle_variables
# list of raw variable names needed
RAW_VARIABLES = [str('SOILICE')]
VAR_NAME = str('mrfso')
VAR_UNITS = str('kg m-2')
TABLE = str('CMIP6_Lmon.json')
def write_data(varid, data, timeval, timebnds, index, **kwargs):
"""
mrfso = verticalSum(SOILICE, capped_at=5000)
"""
# we only care about data with a value greater then 0
mask = np.greater(data['SOILICE'][index, :], 0.0)
# sum the data over the levgrnd axis
outdata = np.sum(
data['SOILICE'][index, :],
axis=0)
# replace all values greater then 5k with 5k
capped = np.where(
np.greater(outdata, 5000.0),
5000.0,
outdata)
outdata = np.where(
mask,
capped,
outdata)
if kwargs.get('simple'):
return outdata
cmor.write(
varid,
outdata,
time_vals=timeval,
time_bnds=timebnds)
def handle(infiles, tables, user_input_path, **kwargs):
return handle_variables(
metadata_path=user_input_path,
tables=tables,
table=kwargs.get('table', TABLE),
infiles=infiles,
raw_variables=RAW_VARIABLES,
write_data=write_data,
outvar_name=VAR_NAME,
outvar_units=VAR_UNITS,
serial=kwargs.get('serial'),
logdir=kwargs.get('logdir'),
simple=kwargs.get('simple'),
outpath=kwargs.get('outpath'))
# ------------------------------------------------------------------
|
the-stack_106_32098 | """Config flow to configure Coolmaster."""
from pycoolmasternet_async import CoolMasterNet
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import AVAILABLE_MODES, CONF_SUPPORTED_MODES, DEFAULT_PORT, DOMAIN
MODES_SCHEMA = {vol.Required(mode, default=True): bool for mode in AVAILABLE_MODES}
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str, **MODES_SCHEMA})
async def _validate_connection(hass: core.HomeAssistant, host):
cool = CoolMasterNet(host, DEFAULT_PORT)
units = await cool.status()
return bool(units)
class CoolmasterConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Coolmaster config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@core.callback
def _async_get_entry(self, data):
supported_modes = [
key for (key, value) in data.items() if key in AVAILABLE_MODES and value
]
return self.async_create_entry(
title=data[CONF_HOST],
data={
CONF_HOST: data[CONF_HOST],
CONF_PORT: DEFAULT_PORT,
CONF_SUPPORTED_MODES: supported_modes,
},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=DATA_SCHEMA)
errors = {}
host = user_input[CONF_HOST]
try:
result = await _validate_connection(self.hass, host)
if not result:
errors["base"] = "no_units"
except (OSError, ConnectionRefusedError, TimeoutError):
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
return self._async_get_entry(user_input)
|
the-stack_106_32101 | """tech URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'article.views.index'),
url(r'^about$', 'article.views.about'),
url(r'^bate$', 'article.views.bate'),
url(r'^article/', include('article.urls')),
url(r'^admin/', include('admin.urls')),
url(r'^ueditor$', 'glue.plug.ueditor.ueditor_action'),
url(r'^pic/(?P<pic_name>.+)$', 'glue.plug.ueditor.show_pic'),
]
|
the-stack_106_32103 | from pytorch_lightning.loggers import LoggerCollection
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pytorch_lightning as pl
import torch as tr
import torch.nn.functional as F
from torch import Tensor, nn
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.tensorboard.writer import SummaryWriter
from detection_utils.boxes import generate_targets
from detection_utils.demo.plot import draw_detections, plot_confusion_matrix, plot_img
from ..pytorch import softmax_focal_loss
from .boxes import DEFAULT_NMS_THRESHOLD, compute_batch_stats
def loss(
class_predictions: Tensor,
regression_predictions: Tensor,
class_targets: Tensor,
regression_targets: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Computes the classification and regression
smooth L1 (Huber) loss for regression (only on foreground anchor boxes)
softmax focal loss for classification (excluding ignore anchor boxes)
Parameters
----------
class_predictions : Tensor, shape-(N, K, num-class)
regression_predictions : Tensor, shape-(N, K, 4)
class_targets : Tensor, shape-(N, K)
regression_targets : Tensor, shape-(N, K, 4)
Returns
-------
classification_loss, regression_loss: Tuple[Tensor, Tensor], shape-() shape-()
The mean classification loss and regression loss, respectively.
Notes
-----
`N` is the batch size. `K` is the number of anchor boxes associated with
each image.
"""
# shape-(N*K,)
class_targets = class_targets.reshape(-1)
# shape-(N*K, 4)
regression_targets = regression_targets.reshape(-1, 4)
# shape-(N*K, num-class)
class_predictions = class_predictions.reshape(-1, class_predictions.shape[-1])
# shape-(N*K, 4)
regression_predictions = regression_predictions.reshape(-1, 4)
is_true_foreground = tr.squeeze(class_targets > 0)
num_foreground = is_true_foreground.sum().item()
if is_true_foreground.numel() > 0:
regression_loss = F.smooth_l1_loss(
regression_predictions[is_true_foreground],
regression_targets[is_true_foreground],
)
else:
regression_loss = tr.tensor(0).float()
is_not_ignore = tr.squeeze(class_targets > -1)
# the sum of focal loss terms is normalized by the number
# of anchors assigned to a ground-truth box
classification_loss = (
softmax_focal_loss(
class_predictions[is_not_ignore],
class_targets[is_not_ignore],
alpha=0.25,
gamma=2,
reduction="sum",
)
/ num_foreground
)
return classification_loss, regression_loss
class ShapeDetectionModel(pl.LightningModule):
def __init__(self, data_experiment_path: Optional[Union[str, Path]] = None):
super().__init__()
self.confusion_matrices: List[np.ndarray] = []
# stores info for plotting boxes/labels for val-image 0
# at subsequent epoch states of model
# [(boxes-epoch0, labels-epoch0, scores-epoch0), ...]
self.boxes_labels_scores: List[Tuple[np.ndarray, np.ndarray, np.ndarray]] = []
self.data_path = (
Path(data_experiment_path) if data_experiment_path is not None else None
)
self.conv1 = nn.Conv2d(3, 10, 3, padding=1)
self.conv2 = nn.Conv2d(10, 20, 3, padding=1)
self.conv3 = nn.Conv2d(20, 30, 3, padding=1)
self.conv4 = nn.Conv2d(30, 40, 3, padding=1)
self.bn1 = nn.BatchNorm2d(10)
self.bn2 = nn.BatchNorm2d(20)
self.bn3 = nn.BatchNorm2d(30)
self.bn4 = nn.BatchNorm2d(40)
# background / rectangle / triangle / circle
self.classification = nn.Conv2d(40, 4, 1)
self.regression = nn.Conv2d(40, 4, 1)
for layer in (
self.conv1,
self.conv2,
self.conv3,
self.conv4,
self.classification,
self.regression,
):
nn.init.xavier_normal_(layer.weight, np.sqrt(2))
nn.init.constant_(layer.bias, 0)
nn.init.constant_(
self.classification.bias[0], -4.6
) # roughly -log((1-π)/π) for π = 0.01
def forward(self, imgs: Tensor) -> Tuple[Tensor, Tensor]:
""""
Computes the classification scores and bounding box regression associated
with each anchor box of each image.
Parameters
----------
imgs : Tensor, shape-(N, 3, H, W)
A batch of N images.
Returns
-------
classifications, regressions : Tuple[Tensor, Tensor]
shape-(N, K, N_class), shape-(N, K, 4)
For each of N images in the batch, returns the classification scores
and bbox regressions associated with each of the K anchor boxes associated
with that image.
Notes
-----
The anchor boxes are flattened in row-major order"""
imgs = F.max_pool2d(F.relu(self.bn1(self.conv1(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn2(self.conv2(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn3(self.conv3(imgs))), 2)
imgs = F.max_pool2d(F.relu(self.bn4(self.conv4(imgs))), 2)
# (N, num-classes, R, C) -> (N, R, C, num-classes)
classifications = self.classification(imgs).permute(0, 2, 3, 1)
# (N, R, C, num-classes) -> (N, R*C, num-classes)
classifications = classifications.reshape(
imgs.shape[0], -1, classifications.shape[-1]
)
# (N, 4, R, C) -> (N, R, C, 4)
regressions = self.regression(imgs).permute(0, 2, 3, 1)
# (N, R, C, 4) -> (N, R*C, 4)
regressions = regressions.reshape(imgs.shape[0], -1, 4)
return classifications, regressions
def training_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
imgs, class_targets, bbox_targets = batch
class_predictions, regression_predictions = self(imgs)
total_cls_loss, total_reg_loss = loss(
class_predictions, regression_predictions, class_targets, bbox_targets,
)
tot_loss = total_cls_loss + total_reg_loss
self.log("train_loss", total_cls_loss + total_reg_loss)
return tot_loss
def validation_step(self, batch: Tuple[Tensor, ...], batch_idx: int) -> Tensor:
imgs, class_targets, bbox_targets = batch
class_predictions, regression_predictions = self(imgs)
total_cls_loss, total_reg_loss = loss(
class_predictions, regression_predictions, class_targets, bbox_targets,
)
self.log("val_loss", total_cls_loss + total_reg_loss, prog_bar=True)
start = len(imgs) * (batch_idx)
stop = len(imgs) * (batch_idx + 1)
confusion_matrix, precision, recall = compute_batch_stats(
class_predictions=class_predictions,
regression_predictions=regression_predictions,
boxes=self.val_boxes[start:stop],
labels=self.val_labels[start:stop],
feature_map_width=imgs.shape[2] // 16, # backbone downsamples by factor 16
)
ap = precision.mean()
ar = recall.mean()
self.log("val_precision", ap)
self.log("val_recall", ar)
self.log("ap+ar", ap + ar)
return confusion_matrix
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""Plots confusion matrix and example validation image with detections"""
total_confusion_matrix = sum(outputs)
normed_conf_matrix = total_confusion_matrix / tr.sum(
total_confusion_matrix, dim=0, keepdim=True
)
normed_conf_matrix = np.nan_to_num(normed_conf_matrix.numpy())
self.confusion_matrices.append(normed_conf_matrix)
# note: exclude negatives from classification accuracy
val_acc = np.einsum("ii", normed_conf_matrix[1:, 1:]) / (
len(normed_conf_matrix) - 1
)
self.log("val_acc", val_acc)
boxes, labels, scores = zip(
*self.get_detections(self.val_images[:1].to(self.device))
)
self.boxes_labels_scores.append((boxes[0], labels[0], scores[0]))
tensorboard: Optional[SummaryWriter] = self._get_tensorboard_logger()
if tensorboard is not None:
fig, ax = plot_confusion_matrix(
normed_conf_matrix, font_size=15, figsize=(8, 8)
)
tensorboard.add_figure(
"confusion-matrix", fig, global_step=self.current_epoch
)
img_id = 0
fig, ax = plot_img(self.val_images[img_id], figsize=(8, 8))
draw_detections(ax, boxes=boxes[img_id], labels=labels[img_id])
tensorboard.add_figure("example-image", fig, global_step=self.current_epoch)
def configure_optimizers(self):
return Adam(self.parameters(), lr=5e-4)
def setup(self, stage: str) -> None:
from .boxes import make_anchor_boxes
from .data import load_data
assert self.data_path is not None
images, self.train_boxes, self.train_labels = load_data(
self.data_path / "train"
)
H, W = images.shape[1:3]
val_images, self.val_boxes, self.val_labels = load_data(self.data_path / "val")
self.train_images = tr.tensor(images.transpose((0, 3, 1, 2)))
self.val_images = tr.tensor(val_images.transpose((0, 3, 1, 2)))
self.anchor_boxes = make_anchor_boxes(image_height=H, image_width=W)
def train_dataloader(self) -> DataLoader:
train_cls_targs, train_reg_targs = zip(
*(
generate_targets(self.anchor_boxes, bxs, lbls, 0.2, 0.1)
for bxs, lbls in zip(self.train_boxes, self.train_labels)
)
)
train_reg_targs = tr.tensor(train_reg_targs).float()
train_cls_targs = tr.tensor(train_cls_targs).long()
return DataLoader(
TensorDataset(self.train_images, train_cls_targs, train_reg_targs),
batch_size=16,
pin_memory=True,
num_workers=4,
shuffle=True,
drop_last=True,
)
def val_dataloader(self) -> DataLoader:
val_cls_targs, val_reg_targs = zip(
*(
generate_targets(self.anchor_boxes, bxs, lbls, 0.2, 0.1)
for bxs, lbls in zip(self.val_boxes, self.val_labels)
)
)
val_reg_targs = tr.tensor(val_reg_targs).float()
val_cls_targs = tr.tensor(val_cls_targs).long()
return DataLoader(
TensorDataset(self.val_images, val_cls_targs, val_reg_targs),
batch_size=16,
pin_memory=True,
num_workers=4,
shuffle=False,
drop_last=True,
)
def get_detections(
self,
imgs: Tensor,
score_threshold: float = None,
nms_threshold: float = DEFAULT_NMS_THRESHOLD,
) -> List[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
""""
Computes the best bounding boxes and classification scores.
Parameters
----------
imgs : Tensor, shape-(N, 3, H, W)
A batch of N images.
score_threshold: Optional[float]
If specified, detections with foreground scores below this
threshold are ignored
nms_threshold: float, optional
The IoU threshold to use for NMS, above which one of two box will be suppressed.
Returns
-------
List[Tuple[np.ndarray, np.ndarray, np.ndarray]]
The (boxes, labels, and confidence scores) for each of the N images
- boxes: ndarray shape=(N_det, 4)
- labels : ndarray shape=(N_det, 1)
- scores : ndarray shape=(N_det,)]
Notes
-----
The anchor boxes are flattened in row-major order.
Boxes are reported as (xlo, ylo, xhi, yhi).
"""
from detection_utils.demo.boxes import compute_detections
was_training = self.training
self.eval()
try:
with tr.no_grad():
class_predictions, regression_predictions = self.forward(imgs)
finally:
if was_training:
self.train(mode=True)
return [
compute_detections(
classifications=cls,
regressions=regr,
feature_map_width=imgs.shape[-1] // 16,
nms_threshold=nms_threshold,
score_threshold=score_threshold,
)
for cls, regr in zip(class_predictions, regression_predictions)
]
def _get_tensorboard_logger(self) -> Optional[SummaryWriter]:
if isinstance(self.logger.experiment, SummaryWriter):
return self.logger.experiment
elif isinstance(self.logger, LoggerCollection):
for logger in self.logger.experiment:
if isinstance(logger, SummaryWriter):
return logger
return None
|
the-stack_106_32104 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# KMS操作失败。
FAILEDOPERATION_ACCESSKMSERROR = 'FailedOperation.AccessKmsError'
# 凭据被禁止轮转。
FAILEDOPERATION_ROTATIONFORBIDDEN = 'FailedOperation.RotationForbidden'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 标签键重复。
INVALIDPARAMETERVALUE_TAGKEYSDUPLICATED = 'InvalidParameterValue.TagKeysDuplicated'
# 标签键或标签值不存在。
INVALIDPARAMETERVALUE_TAGSNOTEXISTED = 'InvalidParameterValue.TagsNotExisted'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# AccessKey已经达到上限。
OPERATIONDENIED_ACCESSKEYOVERLIMIT = 'OperationDenied.AccessKeyOverLimit'
# 不允许手动更新具有自动轮换功能的凭据。
OPERATIONDENIED_AUTOROTATEDRESOURCE = 'OperationDenied.AutoRotatedResource'
# 角色不存在。
OPERATIONDENIED_ROLENOTEXIST = 'OperationDenied.RoleNotExist'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 凭据名已存在。
RESOURCEINUSE_SECRETEXISTS = 'ResourceInUse.SecretExists'
# 版本号已存在。
RESOURCEINUSE_VERSIONIDEXISTS = 'ResourceInUse.VersionIdExists'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 凭据不存在。
RESOURCENOTFOUND_SECRETNOTEXIST = 'ResourceNotFound.SecretNotExist'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# 服务未购买。
RESOURCEUNAVAILABLE_NOTPURCHASED = 'ResourceUnavailable.NotPurchased'
# 凭据被禁用。
RESOURCEUNAVAILABLE_RESOURCEDISABLED = 'ResourceUnavailable.ResourceDisabled'
# 凭据处于计划删除状态。
RESOURCEUNAVAILABLE_RESOURCEPENDINGDELETED = 'ResourceUnavailable.ResourcePendingDeleted'
# 凭据未完成初始化。
RESOURCEUNAVAILABLE_RESOURCEUNINITIALIZED = 'ResourceUnavailable.ResourceUninitialized'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 访问KMS失败。
UNAUTHORIZEDOPERATION_ACCESSKMSERROR = 'UnauthorizedOperation.AccessKmsError'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
the-stack_106_32105 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Debugger to save results during training.
"""
from __future__ import annotations
import threading
from queue import Queue
from typing import Optional
from torch import Tensor
from onevision.type import Callable
from onevision.utils import console
__all__ = [
"Debugger"
]
# MARK: - Debugger
class Debugger:
"""
Attributes:
every_n_epochs (int):
Number of epochs between debugging. To disable, set
`every_n_epochs=0`. Default: `1`.
run_in_parallel (bool):
If `True` runs debugging process in a separated thread.
Default: `True`.
queue_size (int):
Debug queue size. It should equal the value of `save_max_n`.
Default: `20`.
save_max_n (int):
Maximum debugging items to be kept. Default: `20`.
save_to_subdir (bool):
Save all debug images of the same epoch to a sub-directory naming
after the epoch number. Default: `True`.
image_quality (int):
Image quality to be saved. Default: `95`.
verbose (bool):
If `True` shows the results on the screen. Default: `False`.
show_max_n (int):
Maximum debugging items to be shown. Default: `8`.
show_func (FunCls, optional):
Function to visualize the debug results. Default: `None`.
wait_time (float):
Pause some times before showing the next image. Default: `0.001`.
"""
# MARK: Magic Functions
def __init__(
self,
every_n_epochs : int = 1,
run_in_parallel: bool = True,
queue_size : Optional[int] = 20,
save_max_n : int = 20,
save_to_subdir : bool = True,
image_quality : int = 95,
verbose : bool = False,
show_max_n : int = 8,
show_func : Optional[Callable] = None,
wait_time : float = 0.001,
*args, **kwargs
):
super().__init__()
self.every_n_epochs = every_n_epochs
self.run_in_parallel = run_in_parallel
self.queue_size = queue_size
self.save_max_n = save_max_n
self.save_to_subdir = save_to_subdir
self.image_quality = image_quality
self.verbose = verbose
self.show_max_n = show_max_n
self.show_func = show_func
self.wait_time = wait_time
self.debug_queue = None
self.thread_debugger = None
# self.init_thread()
# MARK: Configure
def init_thread(self):
if self.run_in_parallel:
self.debug_queue = Queue(maxsize=self.queue_size)
self.thread_debugger = threading.Thread(
target=self.show_results_parallel
)
# MARK: Run
def run(
self,
x : Optional[Tensor] = None,
y : Optional[Tensor] = None,
yhat : Optional[Tensor] = None,
filepath: Optional[str] = None,
):
"""Run the debugger process."""
if self.show_func:
if self.thread_debugger:
self.debug_queue.put([x, y, yhat, filepath])
else:
self.show_results(x=x, y=y, yhat=yhat, filepath=filepath)
def run_routine_start(self):
"""Perform operations when run routine starts."""
self.init_thread()
if self.thread_debugger and not self.thread_debugger.is_alive():
self.thread_debugger.start()
def run_routine_end(self):
"""Perform operations when run routine ends."""
if self.thread_debugger and self.thread_debugger.is_alive():
self.debug_queue.put([None, None, None, None])
def is_alive(self) -> bool:
"""Return whether the thread is alive."""
if self.thread_debugger:
return self.thread_debugger.is_alive()
return False
# MARK: Visualize
def show_results(
self,
x : Optional[Tensor] = None,
y : Optional[Tensor] = None,
yhat : Optional[Tensor] = None,
filepath: Optional[str] = None,
*args, **kwargs
):
self.show_func(
x = x,
y = y,
yhat = yhat,
filepath = filepath,
image_quality = self.image_quality,
verbose = self.verbose,
show_max_n = self.show_max_n,
wait_time = self.wait_time,
*args, **kwargs
)
def show_results_parallel(self):
"""Draw `result` in a separated thread."""
while True:
(input, target, pred, filepath) = self.debug_queue.get()
if input is None:
break
self.show_results(x=input, y=target, yhat=pred, filepath=filepath)
# Stop debugger thread
self.thread_debugger.join()
# MARK: Utils
def print(self):
console.log(vars(self))
|
the-stack_106_32107 |
import numpy as np
# mne imports
import mne
from mne import io
from mne.datasets import sample
# EEGNet-specific imports
from EEGModels import EEGNet
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
# PyRiemann imports
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from pyriemann.utils.viz import plot_confusion_matrix
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
# tools for plotting confusion matrices
from matplotlib import pyplot as plt
##################### Process, filter and epoch the data ######################
data_path = sample.data_path()
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0., 1
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True, verbose=False)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True, verbose=False)
print(epochs)
labels = epochs.events[:, -1]
# extract raw data. scale by 1000 due to scaling sensitivity in deep learning
X = epochs.get_data() * 1000 # format is in (trials, channels, samples)
y = labels
print(y)
kernels, chans, samples = 1, 60, 151
# take 50/25/25 percent of the data to train/validate/test
X_train = X[0:144, ]
Y_train = y[0:144]
X_validate = X[144:216, ]
Y_validate = y[144:216]
X_test = X[216:, ]
Y_test = y[216:]
############################# EEGNet portion ##################################
# convert labels to one-hot encodings.
Y_train = np_utils.to_categorical(Y_train - 1)
Y_validate = np_utils.to_categorical(Y_validate - 1)
Y_test = np_utils.to_categorical(Y_test - 1)
# convert data to NCHW (trials, kernels, channels, samples) format. Data
# contains 60 channels and 151 time-points. Set the number of kernels to 1.
X_train = X_train.reshape(X_train.shape[0], chans, samples, kernels)
X_validate = X_validate.reshape(X_validate.shape[0], chans, samples, kernels)
X_test = X_test.reshape(X_test.shape[0], chans, samples, kernels)
print('X_train shape:', X_train.shape)
print('X_validate shape:', X_validate.shape)
print('X_test shape:', X_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# configure the EEGNet-8,2,16 model with kernel length of 32 samples (other
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes=4, Chans=chans, Samples=samples,
dropoutRate=0.5, kernLength=32, F1=8, D=2, F2=16,
dropoutType='Dropout')
# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# count number of parameters in the model
numParams = model.count_params()
# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath=r'/home/changhongli/arl-eegmodels-master/examples/EEGNet-8-2-weights.h5', verbose=1, save_best_only=True)
###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during
# optimization to balance it out. This data is approximately balanced so we
# don't need to do this, but is shown here for illustration/completeness.
###############################################################################
# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0: 1, 1: 1, 2: 1, 3: 1}
################################################################################
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
fittedModel = model.fit(X_train, Y_train, batch_size=16, epochs=300, verbose=2, validation_data=(X_validate, Y_validate), callbacks=[checkpointer], class_weight=class_weights)
# load optimal weights
# model.load_weights('/tmp/checkpoint.h5')
###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################
WEIGHTS_PATH = r'/home/changhongli/arl-eegmodels-master/examples/EEGNet-8-2-weights.h5'
model.load_weights(WEIGHTS_PATH)
###############################################################################
# make prediction on test set.
###############################################################################
probs = model.predict(X_test)
preds = probs.argmax(axis=-1)
acc = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))
############################# PyRiemann Portion ##############################
# code is taken from PyRiemann's ERP sample script, which is decoding in
# the tangent space with a logistic regression
n_components = 2 # pick some components
# set up sklearn pipeline
clf = make_pipeline(XdawnCovariances(n_components), TangentSpace(metric='riemann'), LogisticRegression())
preds_rg = np.zeros(len(Y_test))
# reshape back to (trials, channels, samples)
X_train = X_train.reshape(X_train.shape[0], chans, samples)
X_test = X_test.reshape(X_test.shape[0], chans, samples)
# train a classifier with xDAWN spatial filtering + Riemannian Geometry (RG)
# labels need to be back in single-column format
clf.fit(X_train, Y_train.argmax(axis=-1))
preds_rg = clf.predict(X_test)
# Printing the results
acc2 = np.mean(preds_rg == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc2))
# plot the confusion matrices for both classifiers
names = ['audio left', 'audio right', 'vis left', 'vis right']
plt.figure(0)
plot_confusion_matrix(preds, Y_test.argmax(axis=-1), names, title='EEGNet-8,2')
plt.figure(1)
plot_confusion_matrix(preds_rg, Y_test.argmax(axis=-1), names, title='xDAWN + RG')
|
the-stack_106_32109 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Wazzle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.messages import COIN, CTransaction
from test_framework.test_framework import WazzleTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
hex_str_to_bytes,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(WazzleTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(1) # Get out of IBD
self.sync_all()
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert len(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=False)) == 0
assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True),
{"category": "receive", "amount": Decimal("0.1")},
{"txid": txid, "label": "watchonly"})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert not is_opt_in(self.nodes[0], txid_1)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert not is_opt_in(self.nodes[1], txid_2)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = tx3_modified.serialize().hex()
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert is_opt_in(self.nodes[0], txid_3)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert not is_opt_in(self.nodes[1], txid_4)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = tx3_b.serialize().hex()
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, 0)
assert is_opt_in(self.nodes[0], txid_3b)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert txid_3b not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
the-stack_106_32110 | from django.contrib import admin
from .models import User
from django.contrib.auth.models import Group
class UserAdmin(admin.ModelAdmin):
list_display = (
'user_id',
'name',
'student_id',
'grade',
'circles',
'department',
'level',
'date_joined'
)
search_fields = ('user_id', 'name', 'student_id', 'department')
admin.site.register(User, UserAdmin)
admin.site.unregister(Group) |
the-stack_106_32113 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
import subprocess
import shlex
import re
import tempfile
import os
# Subprocess and log parsing related functions
def subprocess_exec(cmd_string, log_file_path=None, extract=None):
"""
Execute the input string as subprocess
cmd_string: String
Input string to be executed as a sub process
extract: String
Based on extract as time/dir we extract this information from
the logs accordingly
log_file_path: String
Path to write the log file
return: String
Based on extract we return the relevant string
"""
# Debug
# print(cmd_string)
exec_command = shlex.split(cmd_string)
log_file = None
is_temp_file = False
if log_file_path is not None:
log_file_path = log_file_path + '.log'
log_file = open(log_file_path, "w+")
else:
os_log_file, log_file_path = tempfile.mkstemp()
log_file = os.fdopen(os_log_file, 'w+')
is_temp_file = True
log_file.write(' '.join(exec_command))
log_file.write('\n')
proc1 = subprocess.Popen(exec_command, stdout=log_file,
stderr=subprocess.STDOUT)
proc1.wait()
return_code = proc1.returncode
log_file.close()
log_file = open(log_file_path, 'r+')
if return_code == 0:
if extract == 'time':
return_data = parse_time(log_file)
if extract == 'dir':
return_data = parse_hdfs_paths(log_file)
if extract == 'hdfs_base':
return_data = parse_hdfs_base(log_file)
if extract is None:
return_data = 0
if return_code != 0:
return_data = 'proc_fail'
print('sub-process failed, return code {}'.format(return_code))
if is_temp_file:
os.remove(log_file_path)
return return_data
def parse_hdfs_base(std_outs):
"""
return: String
hdfs base uri
"""
hdfs_uri = None
for line in std_outs:
if line.startswith('hdfs://'):
hdfs_uri = line.strip()
if hdfs_uri is None:
sys.exit('HDFS URI not found')
return hdfs_uri
def write_logs(std_outs, log_file_path):
"""
Write all logs to the specified location
"""
with open(log_file_path, 'w')as log:
log.write("\n".join(std_outs))
def get_all_logs(process):
"""
Based on the subprocess capture logs
process: Process
Process object
return: List, List
Std out and Error as logs as list
"""
out_arr = []
while True:
nextline = process.stdout.readline().decode('utf8').strip()
out_arr.append(nextline)
if nextline == '' and process.poll() is not None:
break
error_arr = []
while True:
nextline = process.stderr.readline().decode('utf8').strip()
error_arr.append(nextline)
if nextline == '' and process.poll() is not None:
break
return out_arr, error_arr
def parse_hdfs_paths(std_outs):
"""
Extract the hdfs paths from the input
std_outs: List
Std outs obtained from the subprocess
return: List
Obtain a list of hdfs paths
"""
hdfs_dir = []
for i in std_outs:
if 'No such file or directory' in i:
break
elif 'hdfs' in i:
current_dir = i.split(' ')[-1].strip()
hdfs_dir.append(current_dir)
return hdfs_dir
def parse_time(raw_logs):
"""
Parses raw input list and extracts time
raw_logs : List
Each line obtained from the standard output is in the list
return: String
Extracted time in seconds or time_not_found
"""
# Debug
# print(raw_logs)
for line in raw_logs:
if 'ERROR' in line:
return 'error'
if line.startswith('Total execution time'):
extract_time = re.findall(r'\d+', line)
total_time = '.'.join(extract_time)
return total_time
return 'time_not_found'
|
the-stack_106_32114 | import sys
import os
inputfile=os.path.join(sys.path[0],sys.argv[1])
frequency = 0
frequencies = {0}
changes = []
with open(inputfile) as changes_file:
for change in changes_file:
changes.append(int(change))
while True:
for change in changes:
frequency += change
if frequency in frequencies:
print(frequency)
sys.exit(0)
frequencies.add(frequency)
|
the-stack_106_32115 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
聯合報
the crawl deal with udn's news
Usage: scrapy crawl udn -o <filename.json>
"""
from datetime import datetime
from datetime import timedelta
import scrapy
TODAY_STR = datetime.now().strftime('%m-%d')
class UdnSpider(scrapy.Spider):
name = "udn_origin"
def start_requests(self):
url = 'https://udn.com/news/breaknews/1'
meta = {'iter_time': 1}
yield scrapy.Request(url, callback=self.parse, meta=meta)
def parse(self, response):
has_next_page = True
is_first_iter = response.meta['iter_time'] == 1
response.meta['iter_time'] += 1
el_selector = '#breaknews_body dt' if is_first_iter else 'dt'
target = response.css(el_selector)
if not target:
has_next_page = False
for news in target:
url = news.css('a::attr(href)').extract_first()
url = response.urljoin(url)
date_time = news.css('.info .dt::text').extract_first()
if TODAY_STR not in date_time:
has_next_page = False
break
yield scrapy.Request(url, callback=self.parse_news)
if has_next_page:
iter_time = response.meta['iter_time']
yield scrapy.FormRequest(
url='https://udn.com/news/get_breaks_article/%d/1/0' %
iter_time,
callback=self.parse,
meta=response.meta)
def parse_news(self, response):
title = response.css('h1::text').extract_first()
date_of_news = response.css(
'.story_bady_info_author span::text').extract_first()[:10]
content = ""
for p in response.css('p'):
p_text = p.css('::text')
if p_text:
content += ' '.join(p_text.extract())
category_links = response.css('div div div.only_web a')
category = category_links[1].css('::text').extract_first()
yield {
'website': "聯合報",
'url': response.url,
'title': title,
'date': date_of_news,
'content': content,
'category': category
}
|
the-stack_106_32118 | # -*- coding: utf-8 -*-
# @Time : 2020/12/01 15:44:11
# @Author : DannyDong
# @File : run.py
# @Describe: flask应用入口
from factory import create_app
if __name__ == "__main__":
app = create_app()
app.run()
|
the-stack_106_32119 | # Day 12: Subterranean Sustainability
from utils import *
def main(start: str):
rules = load_rules()
plants = set()
for i in range(len(start)):
if start[i] == '#':
plants.add(i)
for i in range(20):
plants = iterate(plants, rules)
print('Part 1:', sum(plants))
# By inspection, the difference stabilizes to 53 each generation
# At 500 generations, the value is 26966
print('Part 2:', 26966 + (50000000000 - 500) * 53)
def iterate(plants, rules):
min_p = min(plants) - 3
max_p = max(plants) + 3
plants2 = set()
for i in range(min_p, max_p + 1):
mp = ''
for off in range(-2, 3):
mp += '#' if i + off in plants else '.'
if mp in rules:
plants2.add(i)
return plants2
def load_rules():
rules = set()
for line in get_input_lines():
args = line.split(' => ')
if args[1][0] == '#':
rules.add(args[0])
return rules
if __name__ == '__main__':
main('##.#############........##.##.####..#.#..#.##...###.##......#.#..#####....##..#####..#.#.##.#.##')
|
the-stack_106_32120 | import argparse
import os
import numpy as np
from dagbldr.datasets import fetch_binarized_mnist
from dagbldr.utils import convert_to_one_hot, load_checkpoint
from dagbldr.utils import interpolate_between_points, make_gif
parser = argparse.ArgumentParser()
parser.add_argument("saved_functions_file",
help="Saved pickle file from vae training")
parser.add_argument("--seed", "-s",
help="random seed for path calculation",
action="store", default=1979, type=int)
args = parser.parse_args()
if not os.path.exists(args.saved_functions_file):
raise ValueError("Please provide a valid path for saved pickle file!")
checkpoint_dict = load_checkpoint(args.saved_functions_file)
encode_function = checkpoint_dict["encode_function"]
decode_function = checkpoint_dict["decode_function"]
predict_function = checkpoint_dict["predict_function"]
random_state = np.random.RandomState(args.seed)
mnist = fetch_binarized_mnist()
# visualize against validation so we aren't cheating
valid_indices = mnist["valid_indices"]
data = mnist["data"]
target = mnist["target"]
X = data[valid_indices]
y = target[valid_indices]
n_classes = len(set(y))
# number of samples
n_plot_samples = 6
# MNIST dimensions
width = 28
height = 28
# Get random data samples
ind = np.arange(len(X))
random_state.shuffle(ind)
sample_X = X[ind[:n_plot_samples]]
sample_y = convert_to_one_hot(y[ind[:n_plot_samples]], n_classes=n_classes)
def gen_samples(X, y):
mu, log_sig = encode_function(X)
# No noise at test time - repeat y twice because y_pred is needed for Theano
# But it is not used unless y_sym is all -1
out, = decode_function(mu + np.exp(log_sig), y)
return out
# VAE specific plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
all_pred_y, = predict_function(X)
all_pred_y = np.argmax(all_pred_y, axis=1)
accuracy = np.mean(all_pred_y.ravel() == y.ravel())
f, axarr = plt.subplots(n_plot_samples, 2)
n_correct_to_show = n_plot_samples // 2
n_incorrect_to_show = n_plot_samples - n_correct_to_show
correct_ind = np.where(all_pred_y == y)[0]
incorrect_ind = np.where(all_pred_y != y)[0]
random_state.shuffle(correct_ind)
random_state.shuffle(incorrect_ind)
c = correct_ind[:n_correct_to_show]
i = incorrect_ind[:n_incorrect_to_show]
X_corr = X[c]
X_incorr = X[i]
X_stack = np.vstack((X_corr, X_incorr))
y_corr = convert_to_one_hot(y[c], n_classes=10)
y_incorr = convert_to_one_hot(y[i], n_classes=10)
y_stack = np.vstack((y_corr, y_incorr))
generated_X = gen_samples(X_stack, y_stack)
predicted_y = convert_to_one_hot(np.hstack((all_pred_y[c], all_pred_y[i])),
n_classes=10)
for n, (X_i, y_i, sx_i, sy_i) in enumerate(zip(X_stack, y_stack,
generated_X, predicted_y)):
axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray")
axarr[n, 1].matshow(sx_i.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
axarr[n, 1].axis('off')
y_a = np.argmax(y_i)
sy_a = np.argmax(sy_i)
axarr[n, 0].text(0, 7, str(y_a), color='green')
if y_a == sy_a:
axarr[n, 1].text(0, 7, str(sy_a), color='green')
else:
axarr[n, 1].text(0, 7, str(sy_a), color='red')
f.suptitle("Validation accuracy: %s" % str(accuracy))
plt.savefig('vae_reconstruction.png')
plt.close()
# Style plotting
f, axarr = plt.subplots(n_plot_samples, n_classes + 1)
for n, (X_i, y_i) in enumerate(zip(sample_X, sample_y)):
true_rec = gen_samples(X_i[None], y_i[None])
fixed_mu, fixed_sigma = encode_function(X_i[None])
axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
all_mu = fixed_mu * np.ones((n_classes, fixed_mu.shape[1])).astype(
"float32")
all_sigma = fixed_sigma * np.ones((n_classes, fixed_sigma.shape[1])).astype(
"float32")
all_classes = np.eye(n_classes).astype('int32')
all_recs, = decode_function(all_mu + np.exp(all_sigma), all_classes)
for j in range(1, n_classes + 1):
axarr[n, j].matshow(all_recs[j - 1].reshape(width, height), cmap="gray")
axarr[n, j].axis('off')
f.suptitle("Style variation by changing conditional")
plt.savefig('vae_style.png')
plt.close()
# Calculate noisy linear path between points in space
mus, log_sigmas = encode_function(sample_X)
n_steps = 20
mu_path = interpolate_between_points(mus, n_steps=n_steps)
log_sigma_path = interpolate_between_points(log_sigmas, n_steps=n_steps)
# Noisy path across space from one point to another
path_X = mu_path + np.exp(log_sigma_path)
path_y = np.zeros((len(path_X), n_classes), dtype="int32")
for i in range(n_plot_samples):
path_y[i * n_steps:(i + 1) * n_steps] = sample_y[i]
out, = decode_function(path_X, path_y)
text_y = [str(np.argmax(path_y[i])) for i in range(len(path_y))]
color_y = ["white"] * len(text_y)
make_gif(out, "vae_code.gif", width, height, list_text_per_frame=text_y,
list_text_per_frame_color=color_y, delay=1, grayscale=True)
|
the-stack_106_32121 | '''LeNet in PxTorch.'''
import torch.nn as nn
from src.models.classifiers.general import GeneralNet
class Dense(GeneralNet):
def __init__(self, n_classes, n_channels_in=1, **kwargs):
super(Dense, self).__init__(n_classes, n_channels_in, **kwargs)
self.fc1 = nn.Linear(n_channels_in * 32 * 32, 300)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(300, 300)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(300, 300)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(300, n_classes)
self.to_transform = [('fc1', 'fc1.bias'),
('fc2', 'fc2.bias'),
('fc3', 'fc3.bias'),
('fc4', 'fc4.bias')]
self._finish_init()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
x = self.relu3(x)
x = self.fc4(x)
return x
|
the-stack_106_32122 | def var(mat,col,population,get,obj,dFrame):
from ..customs.objects import Label
nullobj = mat.DEFAULT_NULL
feats = mat.features.labels
if mat.features.level == 1:
feats = [row[0] for row in feats]
col = feats.index(col)+1 if isinstance(col,(tuple,str)) else col
if col != None:
if col<=0 or col>mat.d1:
raise IndexError(f"Column index is out of range, expected range: [1,{mat.d1}]")
s=mat.sdev(col,population)
if s == None:
raise ValueError("Can't get standard deviations")
vs={}
for k,v in s.items():
try:
vs[k]=v**2
except:
vs[k]=nullobj
#Return a matrix
if get==2:
cols = list(vs.keys())
v = [i for i in vs.values()]
cdtypes = [complex] if any([1 if isinstance(val,complex) else 0 for val in v]) else [float]
return obj((len(cols),1),v,features=["Variance"],dtype=dFrame,coldtypes=cdtypes,index=Label(cols,mat.features.names[:]))
#Return a dictionary
elif get==1:
return vs
else:
items=list(vs.values())
if len(items)==1:
return items[0]
if col==None:
return items
return items[col-1] |
the-stack_106_32124 | import os
from auxpump import TEMP_PATH, CONFIG
import time
def remote_exec(cmd, *args):
temp_ver = 0
fname = None
while fname is None or fname in os.listdir(TEMP_PATH):
fname = 'tmp' + str(temp_ver) + '.sh'
temp_ver += 1
fname = os.path.join(TEMP_PATH, fname)
try:
cmd = ' '.join([cmd] + [str(a) for a in args])
with open(fname, 'w+') as temp_sh:
temp_sh.write(cmd)
os.system('plink ' + CONFIG['putty_session'] + ' -m ' + fname)
finally:
for _ in range(4):
try:
os.remove(fname)
break
except OSError:
time.sleep(.05)
continue
|
the-stack_106_32127 | import setuptools
setup_reqs = ['pytest', 'pytest-cov', 'pytest-runner', 'flake8']
setuptools.setup(
name="gordian",
version="2.1.2",
author="Intuit",
author_email="[email protected]",
description="A tool to search and replace files in a Git repo",
url="https://github.com/argoproj-labs/gordian",
install_requires=['pygithub', 'pyyaml', 'jsonpatch', 'deepdiff', 'retry'],
setup_requires=setup_reqs,
extras_require={
'test': setup_reqs
},
tests_require=setup_reqs,
packages=['gordian', 'gordian.files'],
entry_points={
'console_scripts': [
'gordian = gordian.gordian:main',
'pulpo = gordian.pulpo:main',
],
},
)
|
the-stack_106_32130 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.test_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.test_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
|
the-stack_106_32136 | """
This module defines base classes for events.
"""
from datetime import timedelta
from uuid import UUID
from pydantic import BaseModel, Field, validator
__all__ = ["Event", "ElementTransportEvent", "PrimitiveEvent"]
class Event(BaseModel):
"""
The base event schema.
"""
name: str = Field(..., description="The name of the event")
type: str = Field(..., title="Type", description="The type of event")
priority: int = Field(
...,
title="Priority",
description="The importance of the mission event",
ge=1,
le=5,
)
mission_time: timedelta = Field(
...,
title="Mission Time",
description="The time this event starts at, relative to the start of the mission",
)
class ElementTransportEvent(Event):
"""
A schema representing a basic event transporting elements from one node to another.
"""
origin_node_id: UUID = Field(
...,
title="Origin Node ID",
description="The ID of the transport event's origin node",
)
destination_node_id: UUID = Field(
...,
title="Destination Node ID",
description="The ID of the transport event's destination node",
)
edge_id: UUID = Field(
..., description="The ID of the edge between origin and destination nodes"
)
exec_time: timedelta = Field(
..., description="The time this transport event runs for"
)
class PrimitiveEvent(Event):
"""
A schema representing events which other events are decomposed into.
"""
queued_at: timedelta = None
@validator("queued_at", always=True)
def _initialize_queued_at(cls, v, values, **kwargs) -> timedelta:
if v is None:
assert values.get("mission_time") is not None
return values.get("mission_time")
return v
|
the-stack_106_32138 | from _kratos import Simulator as _Simulator
from .generator import Generator, PortType
# Python wrapper for the simulator
class Simulator:
def __init__(self, generator: Generator):
self._sim = _Simulator(generator.internal_generator)
# get the clock and reset
clks = generator.internal_generator.get_ports(PortType.Clock.value)
if len(clks) == 1:
self._clk = generator.ports[clks[0]]
else:
self._clk = None
resets = generator.internal_generator.get_ports(PortType.AsyncReset.value)
if len(resets) == 1:
self._reset = generator.ports[resets[0]]
else:
self._reset = None
def set(self, var, value):
self._sim.set(var, value)
def get(self, var):
if len(var.size) > 1 or var.size[0] > 1:
return self._sim.get_array(var)
else:
return self._sim.get(var)
def cycle(self, n=1):
if self._clk is None:
raise RuntimeError("Single clock not found")
for _ in range(n):
self.set(self._clk, 1)
self.set(self._clk, 0)
def reset(self, reset_high=True):
if self._reset is None:
raise RuntimeError("Single reset not found")
if reset_high:
self.set(self._reset, 1)
self.set(self._reset, 0)
else:
self.set(self._reset, 0)
self.set(self._reset, 1)
|
the-stack_106_32139 | '''
Test TMCL Parameters of TMCM1161 via RS485 interface and module ID 1.
Created on 15.12.2020
@author: LK
'''
from PyTrinamicMicro.platforms.motionpy2.connections.rs485_tmcl_interface import rs485_tmcl_interface
from PyTrinamic.modules.TMCM1161.TMCM_1161 import TMCM_1161
import logging
MODULE_ID = 1
GP_BANK = 0
AP_AXIS = 0
logger = logging.getLogger(__name__)
logger.info("Test module TMCM1161 parameters via RS485")
logger.info("Initializing interface.")
interface = rs485_tmcl_interface(module_id=MODULE_ID)
logger.info("Initializing module.")
module = TMCM_1161(interface)
logger.info("Testing global parameter access.")
logger.info("Getting global parameter ({}, {}) ...".format("timer_0", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.timer_0, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("timer_1", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.timer_1, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("timer_2", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.timer_2, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("stopLeft_0", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.stopLeft_0, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("stopRight_0", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.stopRight_0, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("input_0", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.input_0, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("input_1", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.input_1, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("input_2", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.input_2, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("input_3", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.input_3, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("serialBaudRate", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.serialBaudRate, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("serialAddress", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.serialAddress, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("ASCIIMode", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.ASCIIMode, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("serialHeartbeat", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.serialHeartbeat, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("telegramPauseTime", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.telegramPauseTime, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("serialHostAddress", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.serialHostAddress, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("autoStartMode", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.autoStartMode, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("limitSwitchPolarity", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.limitSwitchPolarity, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("protectionMode", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.protectionMode, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("eepromCoordinateStore", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.eepromCoordinateStore, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("zeroUserVariables", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.zeroUserVariables, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("serialSecondaryAddress", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.serialSecondaryAddress, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("reverseShaftDirection", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.reverseShaftDirection, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("applicationStatus", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.applicationStatus, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("downloadMode", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.downloadMode, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("programCounter", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.programCounter, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("lastTmclError", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.lastTmclError, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("tickTimer", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.tickTimer, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("randomNumber", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.randomNumber, GP_BANK)))
logger.info("Getting global parameter ({}, {}) ...".format("Intpol", module.GPs.timer_0))
logger.info("{}".format(module.getGlobalParameter(module.GPs.Intpol, GP_BANK)))
logger.info("Testing axis parameter access.")
logger.info("Getting axis parameter ({}, {}) ...".format("TargetPosition", module.APs.TargetPosition))
logger.info("{}".format(module.getAxisParameter(module.APs.TargetPosition, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ActualPosition", module.APs.ActualPosition))
logger.info("{}".format(module.getAxisParameter(module.APs.ActualPosition, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("TargetVelocity", module.APs.TargetVelocity))
logger.info("{}".format(module.getAxisParameter(module.APs.TargetVelocity, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ActualVelocity", module.APs.ActualVelocity))
logger.info("{}".format(module.getAxisParameter(module.APs.ActualVelocity, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("MaxVelocity", module.APs.MaxVelocity))
logger.info("{}".format(module.getAxisParameter(module.APs.MaxVelocity, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("MaxAcceleration", module.APs.MaxAcceleration))
logger.info("{}".format(module.getAxisParameter(module.APs.MaxAcceleration, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("MaxCurrent", module.APs.MaxCurrent))
logger.info("{}".format(module.getAxisParameter(module.APs.MaxCurrent, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("StandbyCurrent", module.APs.StandbyCurrent))
logger.info("{}".format(module.getAxisParameter(module.APs.StandbyCurrent, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("PositionReachedFlag", module.APs.PositionReachedFlag))
logger.info("{}".format(module.getAxisParameter(module.APs.PositionReachedFlag, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("referenceSwitchStatus", module.APs.referenceSwitchStatus))
logger.info("{}".format(module.getAxisParameter(module.APs.referenceSwitchStatus, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("RightEndstop", module.APs.RightEndstop))
logger.info("{}".format(module.getAxisParameter(module.APs.RightEndstop, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("LeftEndstop", module.APs.LeftEndstop))
logger.info("{}".format(module.getAxisParameter(module.APs.LeftEndstop, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("rightLimitSwitchDisable", module.APs.rightLimitSwitchDisable))
logger.info("{}".format(module.getAxisParameter(module.APs.rightLimitSwitchDisable, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("leftLimitSwitchDisable", module.APs.leftLimitSwitchDisable))
logger.info("{}".format(module.getAxisParameter(module.APs.leftLimitSwitchDisable, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("minimumSpeed", module.APs.minimumSpeed))
logger.info("{}".format(module.getAxisParameter(module.APs.minimumSpeed, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("actualAcceleration", module.APs.actualAcceleration))
logger.info("{}".format(module.getAxisParameter(module.APs.actualAcceleration, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("RampMode", module.APs.RampMode))
logger.info("{}".format(module.getAxisParameter(module.APs.RampMode, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("MicrostepResolution", module.APs.MicrostepResolution))
logger.info("{}".format(module.getAxisParameter(module.APs.MicrostepResolution, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("Ref_SwitchTolerance", module.APs.Ref_SwitchTolerance))
logger.info("{}".format(module.getAxisParameter(module.APs.Ref_SwitchTolerance, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("softStopFlag", module.APs.softStopFlag))
logger.info("{}".format(module.getAxisParameter(module.APs.softStopFlag, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("EndSwitchPowerDown", module.APs.EndSwitchPowerDown))
logger.info("{}".format(module.getAxisParameter(module.APs.EndSwitchPowerDown, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("rampDivisor", module.APs.rampDivisor))
logger.info("{}".format(module.getAxisParameter(module.APs.rampDivisor, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("PulseDivisor", module.APs.PulseDivisor))
logger.info("{}".format(module.getAxisParameter(module.APs.PulseDivisor, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("Intpol", module.APs.Intpol))
logger.info("{}".format(module.getAxisParameter(module.APs.Intpol, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("DoubleEdgeSteps", module.APs.DoubleEdgeSteps))
logger.info("{}".format(module.getAxisParameter(module.APs.DoubleEdgeSteps, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ChopperBlankTime", module.APs.ChopperBlankTime))
logger.info("{}".format(module.getAxisParameter(module.APs.ChopperBlankTime, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ConstantTOffMode", module.APs.ConstantTOffMode))
logger.info("{}".format(module.getAxisParameter(module.APs.ConstantTOffMode, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("DisableFastDecayComparator", module.APs.DisableFastDecayComparator))
logger.info("{}".format(module.getAxisParameter(module.APs.DisableFastDecayComparator, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ChopperHysteresisEnd", module.APs.ChopperHysteresisEnd))
logger.info("{}".format(module.getAxisParameter(module.APs.ChopperHysteresisEnd, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ChopperHysteresisStart", module.APs.ChopperHysteresisStart))
logger.info("{}".format(module.getAxisParameter(module.APs.ChopperHysteresisStart, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("TOff", module.APs.TOff))
logger.info("{}".format(module.getAxisParameter(module.APs.TOff, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("SEIMIN", module.APs.SEIMIN))
logger.info("{}".format(module.getAxisParameter(module.APs.SEIMIN, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("SECDS", module.APs.SECDS))
logger.info("{}".format(module.getAxisParameter(module.APs.SECDS, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergyHysteresis", module.APs.smartEnergyHysteresis))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergyHysteresis, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("SECUS", module.APs.SECUS))
logger.info("{}".format(module.getAxisParameter(module.APs.SECUS, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergyHysteresisStart", module.APs.smartEnergyHysteresisStart))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergyHysteresisStart, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("SG2FilterEnable", module.APs.SG2FilterEnable))
logger.info("{}".format(module.getAxisParameter(module.APs.SG2FilterEnable, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("SG2Threshold", module.APs.SG2Threshold))
logger.info("{}".format(module.getAxisParameter(module.APs.SG2Threshold, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("slopeControlHighSide", module.APs.slopeControlHighSide))
logger.info("{}".format(module.getAxisParameter(module.APs.slopeControlHighSide, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("slopeControlLowSide", module.APs.slopeControlLowSide))
logger.info("{}".format(module.getAxisParameter(module.APs.slopeControlLowSide, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ShortToGroundProtection", module.APs.ShortToGroundProtection))
logger.info("{}".format(module.getAxisParameter(module.APs.ShortToGroundProtection, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ShortDetectionTime", module.APs.ShortDetectionTime))
logger.info("{}".format(module.getAxisParameter(module.APs.ShortDetectionTime, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("VSense", module.APs.VSense))
logger.info("{}".format(module.getAxisParameter(module.APs.VSense, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergyActualCurrent", module.APs.smartEnergyActualCurrent))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergyActualCurrent, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergyStallVelocity", module.APs.smartEnergyStallVelocity))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergyStallVelocity, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergyThresholdSpeed", module.APs.smartEnergyThresholdSpeed))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergyThresholdSpeed, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("smartEnergySlowRunCurrent", module.APs.smartEnergySlowRunCurrent))
logger.info("{}".format(module.getAxisParameter(module.APs.smartEnergySlowRunCurrent, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("RandomTOffMode", module.APs.RandomTOffMode))
logger.info("{}".format(module.getAxisParameter(module.APs.RandomTOffMode, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ReferenceSearchMode", module.APs.ReferenceSearchMode))
logger.info("{}".format(module.getAxisParameter(module.APs.ReferenceSearchMode, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("ReferenceSearchSpeed", module.APs.ReferenceSearchSpeed))
logger.info("{}".format(module.getAxisParameter(module.APs.ReferenceSearchSpeed, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("referenceSwitchSpeed", module.APs.referenceSwitchSpeed))
logger.info("{}".format(module.getAxisParameter(module.APs.referenceSwitchSpeed, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("referenceSwitchDistance", module.APs.referenceSwitchDistance))
logger.info("{}".format(module.getAxisParameter(module.APs.referenceSwitchDistance, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("lastReferenceSwitchPosition", module.APs.lastReferenceSwitchPosition))
logger.info("{}".format(module.getAxisParameter(module.APs.lastReferenceSwitchPosition, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("BoostCurrent", module.APs.BoostCurrent))
logger.info("{}".format(module.getAxisParameter(module.APs.BoostCurrent, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("freewheelingDelay", module.APs.freewheelingDelay))
logger.info("{}".format(module.getAxisParameter(module.APs.freewheelingDelay, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("LoadValue", module.APs.LoadValue))
logger.info("{}".format(module.getAxisParameter(module.APs.LoadValue, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("extendedErrorFlags", module.APs.extendedErrorFlags))
logger.info("{}".format(module.getAxisParameter(module.APs.extendedErrorFlags, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("DrvStatusFlags", module.APs.DrvStatusFlags))
logger.info("{}".format(module.getAxisParameter(module.APs.DrvStatusFlags, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("EncoderPosition", module.APs.EncoderPosition))
logger.info("{}".format(module.getAxisParameter(module.APs.EncoderPosition, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("EncoderResolution", module.APs.EncoderResolution))
logger.info("{}".format(module.getAxisParameter(module.APs.EncoderResolution, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("max_EncoderDeviation", module.APs.max_EncoderDeviation))
logger.info("{}".format(module.getAxisParameter(module.APs.max_EncoderDeviation, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("PowerDownDelay", module.APs.PowerDownDelay))
logger.info("{}".format(module.getAxisParameter(module.APs.PowerDownDelay, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("absoluteResolverValue", module.APs.absoluteResolverValue))
logger.info("{}".format(module.getAxisParameter(module.APs.absoluteResolverValue, AP_AXIS)))
logger.info("Getting axis parameter ({}, {}) ...".format("Step_DirectionMode", module.APs.Step_DirectionMode))
logger.info("{}".format(module.getAxisParameter(module.APs.Step_DirectionMode, AP_AXIS)))
logger.info("Test completed successfully.")
|
the-stack_106_32141 | #-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: Info_Ext_main.py
@desc: 数据预处理
@time: 2018/08/08
"""
import sys, pickle, os, random
import numpy as np
import pdb
## tags, BIO 标注策略
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-LOC": 3, "I-LOC": 4,
"B-ORG": 5, "I-ORG": 6
}
def read_corpus(corpus_path):
"""
read corpus and return the list of samples
:param corpus_path:
:return: data
"""
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines()
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char)
tag_.append(label)
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
def vocab_build(vocab_path, corpus_path, min_count):
"""
:param vocab_path:
:param corpus_path:
:param min_count:
:return:
"""
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <='\u005a') or ('\u0061' <= word <='\u007a'):
word = '<ENG>'
if word not in word2id:
word2id[word] = [len(word2id)+1, 1]
else:
word2id[word][1] += 1
low_freq_words = []
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>':
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word]
new_id = 1
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw)
def sentence2id(sent, word2id):
"""
:param sent:
:param word2id:
:return:
"""
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id:
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path):
"""
:param vocab_path:
:return:
"""
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab, embedding_dim):
"""
:param vocab:
:param embedding_dim:
:return:
"""
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
return embedding_mat
def pad_sequences(sequences, pad_mark=0):
"""
:param sequences:
:param pad_mark:
:return:
"""
max_len = max(map(lambda x : len(x), sequences))
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False):
"""
:param data:
:param batch_size:
:param vocab:
:param tag2label:
:param shuffle:
:return:
"""
if shuffle:
random.shuffle(data)
seqs, labels = [], []
for (sent_, tag_) in data:
sent_ = sentence2id(sent_, vocab)
label_ = [tag2label[tag] for tag in tag_]
if len(seqs) == batch_size:
yield seqs, labels
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
|
the-stack_106_32142 | """Module for keeping the value of a RemoteValue from KNX bus up to date."""
import asyncio
from enum import Enum
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Optional, Tuple, Union
from xknx.remote_value import RemoteValue
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.state_updater")
DEFAULT_UPDATE_INTERVAL = 60
MAX_UPDATE_INTERVAL = 1440
class StateUpdater:
"""Class for keeping the states of RemoteValues up to date."""
def __init__(self, xknx: "XKNX", parallel_reads: int = 2):
"""Initialize StateUpdater class."""
self.xknx = xknx
self.started = False
self._workers: Dict[int, _StateTracker] = {}
self._semaphore = asyncio.Semaphore(value=parallel_reads)
def register_remote_value(
self,
remote_value: RemoteValue,
tracker_options: Union[bool, int, float, str] = True,
) -> None:
"""Register a RemoteValue to initialize its state and/or track for expiration."""
def parse_tracker_options(
tracker_options: Union[bool, int, float, str]
) -> Tuple[StateTrackerType, Union[int, float]]:
"""Parse tracker type and expiration time."""
tracker_type = StateTrackerType.EXPIRE
update_interval: Union[int, float] = DEFAULT_UPDATE_INTERVAL
if isinstance(tracker_options, bool):
# `True` would be overwritten by the check for `int`
return (tracker_type, update_interval)
if isinstance(tracker_options, (int, float)):
update_interval = tracker_options
elif isinstance(tracker_options, str):
_options = tracker_options.split()
if _options[0].upper() == "INIT":
tracker_type = StateTrackerType.INIT
elif _options[0].upper() == "EXPIRE":
tracker_type = StateTrackerType.EXPIRE
elif _options[0].upper() == "EVERY":
tracker_type = StateTrackerType.PERIODICALLY
else:
logger.warning(
'Could not parse StateUpdater tracker_options "%s" for %s. Using default %s %s minutes.',
tracker_options,
remote_value,
tracker_type,
update_interval,
)
return (tracker_type, update_interval)
try:
if _options[1].isdigit():
update_interval = int(_options[1])
except IndexError:
pass # No time given (no _options[1])
if update_interval > MAX_UPDATE_INTERVAL:
logger.warning(
"StateUpdater interval of %s to long for %s. Using maximum of %s minutes (1 day)",
tracker_options,
remote_value,
MAX_UPDATE_INTERVAL,
)
update_interval = MAX_UPDATE_INTERVAL
return (tracker_type, update_interval)
async def read_state_mutex() -> None:
"""Schedule to read the state from the KNX bus - one at a time."""
async with self._semaphore:
# wait until there is nothing else to send to the bus
await self.xknx.telegram_queue.outgoing_queue.join()
logger.debug(
"StateUpdater reading %s for %s - %s",
remote_value.group_address_state,
remote_value.device_name,
remote_value.feature_name,
)
# shield from cancellation so update_received() don't cancel the
# ValueReader leaving the telegram_received_cb until next telegram
await asyncio.shield(remote_value.read_state(wait_for_result=True))
tracker_type, update_interval = parse_tracker_options(tracker_options)
tracker = _StateTracker(
read_state_awaitable=read_state_mutex,
tracker_type=tracker_type,
interval_min=update_interval,
)
self._workers[id(remote_value)] = tracker
logger.debug(
"StateUpdater registered %s %s for %s",
tracker_type,
update_interval,
remote_value,
)
if self.started:
tracker.start()
def unregister_remote_value(self, remote_value: RemoteValue) -> None:
"""Unregister a RemoteValue from StateUpdater."""
self._workers.pop(id(remote_value)).stop()
def update_received(self, remote_value: RemoteValue) -> None:
"""Reset the timer when a state update was received."""
if self.started and id(remote_value) in self._workers:
self._workers[id(remote_value)].update_received()
def start(self) -> None:
"""Start StateUpdater. Initialize states."""
logger.debug("StateUpdater initializing values")
self.started = True
for worker in self._workers.values():
worker.start()
def stop(self) -> None:
"""Stop StateUpdater."""
logger.debug("StateUpdater stopping")
self.started = False
for worker in self._workers.values():
worker.stop()
class StateTrackerType(Enum):
"""Enum indicating the StateUpdater Type."""
INIT = 1
EXPIRE = 2
PERIODICALLY = 3
class _StateTracker:
"""Keeps track of the age of the state from one RemoteValue."""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
read_state_awaitable: Callable[[], Awaitable[None]],
tracker_type: StateTrackerType = StateTrackerType.EXPIRE,
interval_min: float = 60,
):
"""Initialize StateTracker class."""
# pylint: disable=too-many-arguments
self.tracker_type = tracker_type
self.update_interval = interval_min * 60
self._read_state = read_state_awaitable
self._task: Optional[asyncio.Task[None]] = None
def start(self) -> None:
"""Start StateTracker - read state on call."""
self.stop()
self._task = asyncio.create_task(self._start_init())
async def _start_init(self) -> None:
"""Initialize state, start update loop if appropriate."""
await self._read_state()
if self.tracker_type is not StateTrackerType.INIT:
self.reset()
def reset(self) -> None:
"""Start / Restart StateTracker timer - wait for value to expire."""
self.stop()
self._task = asyncio.create_task(self._update_loop())
def stop(self) -> None:
"""Stop StateTracker."""
if self._task:
self._task.cancel()
self._task = None
def update_received(self) -> None:
"""Reset the timer if a telegram was received for a "expire" typed StateUpdater."""
if self.tracker_type == StateTrackerType.EXPIRE:
self.reset()
async def _update_loop(self) -> None:
"""Wait for the update_interval to expire. Endless loop for updating states."""
# for StateUpdaterType.EXPIRE:
# on successfull read the while loop gets canceled when the callback calls update_received()
# when no telegram was received it will try again endlessly
while True:
await asyncio.sleep(self.update_interval)
await self._read_state()
|
the-stack_106_32146 | # This program simulates 10 tosses of a coin.
import random
# Constants
HEADS = 1
TAILS = 2
TOSSES = 10
def main():
for toss in range(TOSSES):
# Simulate the coin toss.
if random.randint(HEADS, TAILS) == HEADS:
print('Heads')
else:
print('Tails')
# Call the main function.
main()
|
the-stack_106_32147 | """Posts URL configuration"""
from django.urls import path
from .views import (
add_post_view,
category_view,
delete_post_view,
post_detail_view,
post_list_view,
search_results_view,
update_post_view,
)
urlpatterns = [
path("post_list/", post_list_view, name="post_list"),
path("post_detail/<int:pk>", post_detail_view, name="post_detail"),
path("add_post/", add_post_view, name="add_post"),
path("post_detail/edit/<int:pk>", update_post_view, name="update_post"),
path("post_detail/<int:pk>/remove", delete_post_view, name="delete_post"),
path("category/<str:cats>/", category_view, name="category"),
path("search/", search_results_view, name="search_results"),
]
|
the-stack_106_32148 | from copy import deepcopy
from typing import List, Union, Tuple, Dict
import numpy as np
import pandas as pd
def process_dataframe(X: Union[pd.DataFrame, np.ndarray], copy=True) -> pd.DataFrame:
if isinstance(X, pd.DataFrame):
if copy:
X_ = deepcopy(X)
else:
X_ = X
elif isinstance(X, np.ndarray):
X_ = pd.DataFrame(X, columns=range(X.shape[1]))
else:
raise NotImplementedError
return X_
def replace_nan_to_None(df: pd.DataFrame) -> pd.DataFrame:
df = deepcopy(df)
for column, dtype in zip(df.columns, df.dtypes):
if dtype == object:
df[column] = df[column].apply(lambda x: None if pd.isna(x) else x)
return df
def replace_dict(dict_: dict, from_, to_):
for k, v in dict_.items():
if v == from_:
dict_[k] = to_
def replace_dicts(dicts, from_, to_):
for dict_ in dicts:
replace_dict(dict_, from_, to_)
def get_unique_col_name(columns: Union[pd.Index, pd.Series], wanted: str):
cnt = 1
while np.sum(columns == wanted) >= 1:
ix = wanted.rfind("_")
if ix<0:
ix=len(wanted)
wanted = wanted[:ix] + f"_{cnt}"
cnt += 1
return wanted
def process_duplicated_columns(columns: pd.Index) -> Tuple[pd.Index, Dict[str, str]]:
# 查看是否有重复列,并去重
if isinstance(columns, pd.Index):
columns = pd.Series(columns)
else:
columns = deepcopy(columns)
unq, cnt = np.unique(columns, return_counts=True)
if len(unq) == len(columns):
return columns, {}
duplicated_columns = unq[cnt > 1]
index2newName = {}
for duplicated_column in duplicated_columns:
for ix in reversed(np.where(columns == duplicated_column)[0]):
new_name = get_unique_col_name(columns, columns[ix])
columns[ix] = new_name
index2newName[ix] = new_name
assert len(np.unique(columns)) == len(columns)
return columns, index2newName
def inverse_dict(dict_: dict):
dict_ = deepcopy(dict_)
return {v: k for k, v in dict_.items()}
def rectify_dtypes(df: pd.DataFrame):
# make sure: only (str, int, float, bool) is valid
object_columns = get_object_columns(df)
for object_column in object_columns:
if not np.any(df[object_column].apply(lambda x: isinstance(x, str))):
if np.any(df[object_column].apply(lambda x: isinstance(x, float))):
df[object_column] = df[object_column].astype(float)
else:
df[object_column] = df[object_column].astype(int)
def get_object_columns(df_: pd.DataFrame) -> List[str]:
return list(df_.dtypes[df_.dtypes == object].index)
class DataFrameValuesWrapper():
def __init__(self, X: Union[pd.DataFrame, pd.Series, np.ndarray]):
if isinstance(X, (pd.DataFrame, pd.Series)):
self.array = X.values
self.dataframe = X
self.origin = "dataframe"
elif isinstance(X, np.ndarray):
self.array = X
self.dataframe = pd.DataFrame(X)
self.origin = "array"
def wrap_to_dataframe(self, array):
return pd.DataFrame(array, columns=self.dataframe.columns, index=self.dataframe.index)
# if __name__ == '__main__':
# columns = pd.Index([str(x) for x in [1, 2, 3, 2, 3, 4, 5]])
# columns, index2newName = process_duplicated_columns(columns)
# print(columns)
# print(index2newName)
|
the-stack_106_32149 | # (C) Datadog, Inc. 2013-2017
# (C) Brett Langdon <[email protected]> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import re
import time
import warnings
from collections import defaultdict
# 3p
from six.moves.urllib.parse import urlparse, quote_plus, urljoin
from six import iteritems
import requests
from requests.exceptions import RequestException
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# project
from datadog_checks.base import AgentCheck, is_affirmative
EVENT_TYPE = SOURCE_TYPE_NAME = 'rabbitmq'
EXCHANGE_TYPE = 'exchanges'
QUEUE_TYPE = 'queues'
NODE_TYPE = 'nodes'
CONNECTION_TYPE = 'connections'
OVERVIEW_TYPE = 'overview'
MAX_DETAILED_EXCHANGES = 50
MAX_DETAILED_QUEUES = 200
MAX_DETAILED_NODES = 100
# Post an event in the stream when the number of queues or nodes to
# collect is above 90% of the limit:
ALERT_THRESHOLD = 0.9
EXCHANGE_ATTRIBUTES = [
# Path, Name, Operation
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/confirm', 'messages.confirm.count', float),
('message_stats/confirm_details/rate', 'messages.confirm.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/publish_in', 'messages.publish_in.count', float),
('message_stats/publish_in_details/rate', 'messages.publish_in.rate', float),
('message_stats/publish_out', 'messages.publish_out.count', float),
('message_stats/publish_out_details/rate', 'messages.publish_out.rate', float),
('message_stats/return_unroutable', 'messages.return_unroutable.count', float),
('message_stats/return_unroutable_details/rate', 'messages.return_unroutable.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
QUEUE_ATTRIBUTES = [
# Path, Name, Operation
('active_consumers', 'active_consumers', float),
('consumers', 'consumers', float),
('consumer_utilisation', 'consumer_utilisation', float),
('memory', 'memory', float),
('messages', 'messages', float),
('messages_details/rate', 'messages.rate', float),
('messages_ready', 'messages_ready', float),
('messages_ready_details/rate', 'messages_ready.rate', float),
('messages_unacknowledged', 'messages_unacknowledged', float),
('messages_unacknowledged_details/rate', 'messages_unacknowledged.rate', float),
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/deliver', 'messages.deliver.count', float),
('message_stats/deliver_details/rate', 'messages.deliver.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
NODE_ATTRIBUTES = [
('fd_used', 'fd_used', float),
('disk_free', 'disk_free', float),
('mem_used', 'mem_used', float),
('run_queue', 'run_queue', float),
('sockets_used', 'sockets_used', float),
('partitions', 'partitions', len),
('running', 'running', float),
('mem_alarm', 'mem_alarm', float),
('disk_free_alarm', 'disk_alarm', float),
]
OVERVIEW_ATTRIBUTES = [
("object_totals/connections", "object_totals.connections", float),
("object_totals/channels", "object_totals.channels", float),
("object_totals/queues", "object_totals.queues", float),
("object_totals/consumers", "object_totals.consumers", float),
("queue_totals/messages", "queue_totals.messages.count", float),
("queue_totals/messages_details/rate", "queue_totals.messages.rate", float),
("queue_totals/messages_ready", "queue_totals.messages_ready.count", float),
("queue_totals/messages_ready_details/rate", "queue_totals.messages_ready.rate", float),
("queue_totals/messages_unacknowledged", "queue_totals.messages_unacknowledged.count", float),
("queue_totals/messages_unacknowledged_details/rate", "queue_totals.messages_unacknowledged.rate", float),
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/confirm', 'messages.confirm.count', float),
('message_stats/confirm_details/rate', 'messages.confirm.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/publish_in', 'messages.publish_in.count', float),
('message_stats/publish_in_details/rate', 'messages.publish_in.rate', float),
('message_stats/publish_out', 'messages.publish_out.count', float),
('message_stats/publish_out_details/rate', 'messages.publish_out.rate', float),
('message_stats/return_unroutable', 'messages.return_unroutable.count', float),
('message_stats/return_unroutable_details/rate', 'messages.return_unroutable.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
ATTRIBUTES = {
EXCHANGE_TYPE: EXCHANGE_ATTRIBUTES,
QUEUE_TYPE: QUEUE_ATTRIBUTES,
NODE_TYPE: NODE_ATTRIBUTES,
OVERVIEW_TYPE: OVERVIEW_ATTRIBUTES,
}
TAG_PREFIX = 'rabbitmq'
TAGS_MAP = {
EXCHANGE_TYPE: {
'name': 'exchange',
'vhost': 'vhost',
'exchange_family': 'exchange_family',
},
QUEUE_TYPE: {
'node': 'node',
'name': 'queue',
'vhost': 'vhost',
'policy': 'policy',
'queue_family': 'queue_family',
},
NODE_TYPE: {
'name': 'node',
},
OVERVIEW_TYPE: {
'cluster_name': 'cluster'
}
}
METRIC_SUFFIX = {
EXCHANGE_TYPE: "exchange",
QUEUE_TYPE: "queue",
NODE_TYPE: "node",
OVERVIEW_TYPE: "overview",
}
class RabbitMQException(Exception):
pass
class RabbitMQ(AgentCheck):
"""This check is for gathering statistics from the RabbitMQ
Management Plugin (http://www.rabbitmq.com/management.html)
"""
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.already_alerted = []
self.cached_vhosts = {} # this is used to send CRITICAL rabbitmq.aliveness check if the server goes down
def _get_config(self, instance):
# make sure 'rabbitmq_api_url' is present and get parameters
base_url = instance.get('rabbitmq_api_url', None)
if not base_url:
raise Exception('Missing "rabbitmq_api_url" in RabbitMQ config.')
if not base_url.endswith('/'):
base_url += '/'
username = instance.get('rabbitmq_user', 'guest')
password = instance.get('rabbitmq_pass', 'guest')
custom_tags = instance.get('tags', [])
parsed_url = urlparse(base_url)
if not parsed_url.scheme or "://" not in parsed_url.geturl():
self.log.warning('The rabbit url did not include a protocol, assuming http')
# urljoin cannot add a protocol to the rest of the url for some reason.
# This still leaves the potential for errors, but such urls would never have been valid, either
# and it's not likely to be useful to attempt to catch all possible mistakes people could make.
# urlparse also has a known issue parsing url with no schema, but a port in the host section
# mistakingly taking the host for the schema, hence the additional validation
base_url = 'http://' + base_url
parsed_url = urlparse(base_url)
suppress_warning = False
ssl_verify = is_affirmative(instance.get('ssl_verify', True))
if not ssl_verify and parsed_url.scheme == 'https':
# Only allow suppressing the warning if not ssl_verify
suppress_warning = instance.get('ignore_ssl_warning', False)
self.log.warning('Skipping SSL cert validation for %s based on configuration.' % (base_url))
# Limit of queues/nodes to collect metrics from
max_detailed = {
EXCHANGE_TYPE: int(instance.get('max_detailed_exchanges', MAX_DETAILED_EXCHANGES)),
QUEUE_TYPE: int(instance.get('max_detailed_queues', MAX_DETAILED_QUEUES)),
NODE_TYPE: int(instance.get('max_detailed_nodes', MAX_DETAILED_NODES)),
}
# List of queues/nodes to collect metrics from
specified = {
EXCHANGE_TYPE: {
'explicit': instance.get('exchanges', []),
'regexes': instance.get('exchanges_regexes', []),
},
QUEUE_TYPE: {
'explicit': instance.get('queues', []),
'regexes': instance.get('queues_regexes', []),
},
NODE_TYPE: {
'explicit': instance.get('nodes', []),
'regexes': instance.get('nodes_regexes', []),
},
}
for object_type, filters in iteritems(specified):
for _, filter_objects in iteritems(filters):
if type(filter_objects) != list:
raise TypeError(
"{0} / {0}_regexes parameter must be a list".format(object_type))
auth = (username, password)
return base_url, max_detailed, specified, auth, ssl_verify, custom_tags, suppress_warning
def _get_vhosts(self, instance, base_url, auth=None, ssl_verify=True):
vhosts = instance.get('vhosts')
if not vhosts:
# Fetch a list of _all_ vhosts from the API.
vhosts_url = urljoin(base_url, 'vhosts')
vhost_proxy = self.get_instance_proxy(instance, vhosts_url)
vhosts_response = self._get_data(vhosts_url, auth=auth, ssl_verify=ssl_verify, proxies=vhost_proxy)
vhosts = [v['name'] for v in vhosts_response]
return vhosts
def check(self, instance):
base_url, max_detailed, specified, auth, ssl_verify, custom_tags, suppress_warning = self._get_config(instance)
try:
with warnings.catch_warnings():
vhosts = self._get_vhosts(instance, base_url, auth=auth, ssl_verify=ssl_verify)
self.cached_vhosts[base_url] = vhosts
limit_vhosts = []
if self._limit_vhosts(instance):
limit_vhosts = vhosts
# Suppress warnings from urllib3 only if ssl_verify is set to False and ssl_warning is set to False
if suppress_warning:
warnings.simplefilter('ignore', InsecureRequestWarning)
# Generate metrics from the status API.
self.get_stats(instance, base_url, EXCHANGE_TYPE, max_detailed[EXCHANGE_TYPE], specified[EXCHANGE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_stats(instance, base_url, QUEUE_TYPE, max_detailed[QUEUE_TYPE], specified[QUEUE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_stats(instance, base_url, NODE_TYPE, max_detailed[NODE_TYPE], specified[NODE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_overview_stats(instance, base_url, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_connections_stat(instance, base_url, CONNECTION_TYPE, vhosts, limit_vhosts, custom_tags,
auth=auth, ssl_verify=ssl_verify)
# Generate a service check from the aliveness API. In the case of an invalid response
# code or unparseable JSON this check will send no data.
self._check_aliveness(instance, base_url, vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
# Generate a service check for the service status.
self.service_check('rabbitmq.status', AgentCheck.OK, custom_tags)
except RabbitMQException as e:
msg = "Error executing check: {}".format(e)
self.service_check('rabbitmq.status', AgentCheck.CRITICAL, custom_tags, message=msg)
self.log.error(msg)
# tag every vhost as CRITICAL or they would keep the latest value, OK, in case the RabbitMQ server goes down
msg = "error while contacting rabbitmq ({}), setting aliveness to CRITICAL for vhosts: {}".format(
base_url, self.cached_vhosts
)
self.log.error(msg)
for vhost in self.cached_vhosts.get(base_url, []):
self.service_check('rabbitmq.aliveness',
AgentCheck.CRITICAL,
['vhost:{}'.format(vhost)] + custom_tags,
message="Could not contact aliveness API")
def _get_data(self, url, auth=None, ssl_verify=True, proxies=None):
if proxies is None:
proxies = {}
try:
r = requests.get(url,
auth=auth,
proxies=proxies,
timeout=self.default_integration_http_timeout,
verify=ssl_verify)
r.raise_for_status()
return r.json()
except RequestException as e:
raise RabbitMQException('Cannot open RabbitMQ API url: {} {}'.format(url, str(e)))
except ValueError as e:
raise RabbitMQException('Cannot parse JSON response from API url: {} {}'.format(url, str(e)))
def _filter_list(self, data, explicit_filters, regex_filters, object_type, tag_families):
if explicit_filters or regex_filters:
matching_lines = []
for data_line in data:
name = data_line.get("name")
if name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(name)
continue
match_found = False
for p in regex_filters:
match = re.search(p, name)
if match:
if is_affirmative(tag_families) and match.groups():
if object_type == QUEUE_TYPE:
data_line["queue_family"] = match.groups()[0]
if object_type == EXCHANGE_TYPE:
data_line["exchange_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
# Absolute names work only for queues and exchanges
if object_type != QUEUE_TYPE and object_type != EXCHANGE_TYPE:
continue
absolute_name = '{}/{}'.format(data_line.get("vhost"), name)
if absolute_name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(absolute_name)
continue
for p in regex_filters:
match = re.search(p, absolute_name)
if match:
if is_affirmative(tag_families) and match.groups():
if object_type == QUEUE_TYPE:
data_line["queue_family"] = match.groups()[0]
if object_type == EXCHANGE_TYPE:
data_line["exchange_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
return matching_lines
return data
def _get_tags(self, data, object_type, custom_tags):
tags = []
tag_list = TAGS_MAP[object_type]
for t in tag_list:
tag = data.get(t)
if tag:
# FIXME 6.x: remove this suffix or unify (sc doesn't have it)
tags.append('{}_{}:{}'.format(TAG_PREFIX, tag_list[t], tag))
return tags + custom_tags
def get_stats(self, instance, base_url, object_type, max_detailed,
filters, limit_vhosts, custom_tags, auth=None, ssl_verify=True):
"""
instance: the check instance
base_url: the url of the rabbitmq management api (e.g. http://localhost:15672/api)
object_type: either QUEUE_TYPE or NODE_TYPE or EXCHANGE_TYPE
max_detailed: the limit of objects to collect for this type
filters: explicit or regexes filters of specified queues or nodes (specified in the yaml file)
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
# Make a copy of this list as we will remove items from it at each
# iteration
explicit_filters = list(filters['explicit'])
regex_filters = filters['regexes']
data = []
# only do this if vhosts were specified,
# otherwise it'll just be making more queries for the same data
if self._limit_vhosts(instance) and object_type == QUEUE_TYPE:
for vhost in limit_vhosts:
url = '{}/{}'.format(object_type, quote_plus(vhost))
try:
data += self._get_data(urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
except Exception as e:
self.log.debug("Couldn't grab queue data from vhost, {}: {}".format(vhost, e))
else:
data = self._get_data(urljoin(base_url, object_type), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
""" data is a list of nodes or queues:
data = [
{
'status': 'running',
'node': 'rabbit@host',
'name': 'queue1',
'consumers': 0,
'vhost': '/',
'backing_queue_status': {
'q1': 0,
'q3': 0,
'q2': 0,
'q4': 0,
'avg_ack_egress_rate': 0.0,
'ram_msg_count': 0,
'ram_ack_count': 0,
'len': 0,
'persistent_count': 0,
'target_ram_count': 'infinity',
'next_seq_id': 0,
'delta': ['delta', 'undefined', 0, 'undefined'],
'pending_acks': 0,
'avg_ack_ingress_rate': 0.0,
'avg_egress_rate': 0.0,
'avg_ingress_rate': 0.0
},
'durable': True,
'idle_since': '2013-10-03 13:38:18',
'exclusive_consumer_tag': '',
'arguments': {},
'memory': 10956,
'policy': '',
'auto_delete': False
},
{
'status': 'running',
'node': 'rabbit@host,
'name': 'queue10',
'consumers': 0,
'vhost': '/',
'backing_queue_status': {
'q1': 0,
'q3': 0,
'q2': 0,
'q4': 0,
'avg_ack_egress_rate': 0.0,
'ram_msg_count': 0,
'ram_ack_count': 0,
'len': 0,
'persistent_count': 0,
'target_ram_count': 'infinity',
'next_seq_id': 0,
'delta': ['delta', 'undefined', 0, 'undefined'],
'pending_acks': 0,
'avg_ack_ingress_rate': 0.0,
'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0
},
'durable': True,
'idle_since': '2013-10-03 13:38:18',
'exclusive_consumer_tag': '',
'arguments': {},
'memory': 10956,
'policy': '',
'auto_delete': False
},
{
'status': 'running',
'node': 'rabbit@host',
'name': 'queue11',
'consumers': 0,
'vhost': '/',
'backing_queue_status': {
'q1': 0,
'q3': 0,
'q2': 0,
'q4': 0,
'avg_ack_egress_rate': 0.0,
'ram_msg_count': 0,
'ram_ack_count': 0,
'len': 0,
'persistent_count': 0,
'target_ram_count': 'infinity',
'next_seq_id': 0,
'delta': ['delta', 'undefined', 0, 'undefined'],
'pending_acks': 0,
'avg_ack_ingress_rate': 0.0,
'avg_egress_rate': 0.0,
'avg_ingress_rate': 0.0
},
'durable': True,
'idle_since': '2013-10-03 13:38:18',
'exclusive_consumer_tag': '',
'arguments': {},
'memory': 10956,
'policy': '',
'auto_delete': False
},
...
]
"""
if len(explicit_filters) > max_detailed:
raise Exception(
"The maximum number of {} you can specify is {}.".format(object_type, max_detailed))
# a list of queues/nodes is specified. We process only those
data = self._filter_list(data,
explicit_filters,
regex_filters,
object_type,
instance.get("tag_families", False))
# if no filters are specified, check everything according to the limits
if len(data) > ALERT_THRESHOLD * max_detailed:
# Post a message on the dogweb stream to warn
self.alert(base_url, max_detailed, len(data), object_type, custom_tags)
if len(data) > max_detailed:
# Display a warning in the info page
msg = ("Too many items to fetch. "
"You must choose the {} you are interested in by editing the rabbitmq.yaml configuration file"
"or get in touch with Datadog support").format(object_type)
self.warning(msg)
for data_line in data[:max_detailed]:
# We truncate the list if it's above the limit
self._get_metrics(data_line, object_type, custom_tags)
# get a list of the number of bindings on a given queue
# /api/queues/vhost/name/bindings
if object_type is QUEUE_TYPE:
self._get_queue_bindings_metrics(base_url, custom_tags, data, instance_proxy,
instance, object_type, auth, ssl_verify)
def get_overview_stats(self, instance, base_url, custom_tags, auth=None, ssl_verify=True):
instance_proxy = self.get_instance_proxy(instance, base_url)
data = self._get_data(urljoin(base_url, "overview"), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
self._get_metrics(data, OVERVIEW_TYPE, custom_tags)
def _get_metrics(self, data, object_type, custom_tags):
tags = self._get_tags(data, object_type, custom_tags)
for attribute, metric_name, operation in ATTRIBUTES[object_type]:
# Walk down through the data path, e.g. foo/bar => d['foo']['bar']
root = data
keys = attribute.split('/')
for path in keys[:-1]:
root = root.get(path, {})
value = root.get(keys[-1], None)
if value is not None:
try:
self.gauge('rabbitmq.{}.{}'.format(
METRIC_SUFFIX[object_type], metric_name), operation(value), tags=tags)
except ValueError:
self.log.debug("Caught ValueError for {} {} = {} with tags: {}".format(
METRIC_SUFFIX[object_type], attribute, value, tags))
def _get_queue_bindings_metrics(self, base_url, custom_tags, data, instance_proxy,
instance, object_type, auth=None, ssl_verify=True):
for item in data:
vhost = item['vhost']
tags = self._get_tags(item, object_type, custom_tags)
url = '{}/{}/{}/bindings'.format(QUEUE_TYPE, quote_plus(vhost), quote_plus(item['name']))
bindings_count = len(self._get_data(urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy))
self.gauge('rabbitmq.queue.bindings.count', bindings_count, tags)
def get_connections_stat(self, instance, base_url,
object_type, vhosts, limit_vhosts,
custom_tags, auth=None, ssl_verify=True):
"""
Collect metrics on currently open connection per vhost.
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
grab_all_data = True
if self._limit_vhosts(instance):
grab_all_data = False
data = []
for vhost in vhosts:
url = "vhosts/{}/{}".format(quote_plus(vhost), object_type)
try:
data += self._get_data(urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
except Exception as e:
# This will happen if there is no connection data to grab
self.log.debug("Couldn't grab connection data from vhost, {}: {}".format(vhost, e))
# sometimes it seems to need to fall back to this
if grab_all_data or not len(data):
data = self._get_data(urljoin(base_url, object_type), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
stats = {vhost: 0 for vhost in vhosts}
connection_states = defaultdict(int)
for conn in data:
if conn['vhost'] in vhosts:
stats[conn['vhost']] += 1
# 'state' does not exist for direct type connections.
connection_states[conn.get('state', 'direct')] += 1
for vhost, nb_conn in iteritems(stats):
self.gauge('rabbitmq.connections', nb_conn, tags=['{}_vhost:{}'.format(TAG_PREFIX, vhost)] + custom_tags)
for conn_state, nb_conn in iteritems(connection_states):
self.gauge('rabbitmq.connections.state',
nb_conn,
tags=['{}_conn_state:{}'.format(TAG_PREFIX, conn_state)] + custom_tags)
def alert(self, base_url, max_detailed, size, object_type, custom_tags):
key = "{}{}".format(base_url, object_type)
if key in self.already_alerted:
# We have already posted an event
return
self.already_alerted.append(key)
title = (
"RabbitMQ integration is approaching the limit on the number of {} that can be collected from on {}"
).format(object_type, self.hostname)
msg = (
"{} {} are present. The limit is {}. Please get in touch with Datadog support to increase the limit."
).format(size, object_type, max_detailed)
event = {
"timestamp": int(time.time()),
"event_type": EVENT_TYPE,
"msg_title": title,
"msg_text": msg,
"alert_type": 'warning',
"source_type_name": SOURCE_TYPE_NAME,
"host": self.hostname,
"tags": ["base_url:{}".format(base_url), "host:{}".format(self.hostname)] + custom_tags,
"event_object": "rabbitmq.limit.{}".format(object_type),
}
self.event(event)
def _limit_vhosts(self, instance):
"""
Check to see if vhosts were specified in the instance
it will return a boolean, True if they were.
This allows the check to only query the wanted vhosts.
"""
vhosts = instance.get('vhosts', [])
return len(vhosts) > 0
def _check_aliveness(self, instance, base_url, vhosts, custom_tags, auth=None, ssl_verify=True):
"""
Check the aliveness API against all or a subset of vhosts. The API
will return {"status": "ok"} and a 200 response code in the case
that the check passes.
"""
for vhost in vhosts:
tags = ['vhost:{}'.format(vhost)] + custom_tags
# We need to urlencode the vhost because it can be '/'.
path = u'aliveness-test/{}'.format(quote_plus(vhost))
aliveness_url = urljoin(base_url, path)
aliveness_proxy = self.get_instance_proxy(instance, aliveness_url)
aliveness_response = self._get_data(aliveness_url,
auth=auth,
ssl_verify=ssl_verify,
proxies=aliveness_proxy)
message = u"Response from aliveness API: {}".format(aliveness_response)
if aliveness_response.get('status') == 'ok':
status = AgentCheck.OK
else:
status = AgentCheck.CRITICAL
self.service_check('rabbitmq.aliveness', status, tags, message=message)
|
the-stack_106_32150 | import os
from datetime import date
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from jinja2 import TemplateSyntaxError
from requests.exceptions import ConnectionError, HTTPError
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from terra_accounts.permissions import TokenBasedPermission
from terracommon.document_generator.helpers import get_media_response
from terracommon.trrequests.models import UserRequest
from .helpers import DocumentGenerator
from .models import DocumentTemplate, DownloadableDocument
from .serializers import DocumentTemplateSerializer
class DocumentTemplateViewSets(viewsets.ModelViewSet):
queryset = DocumentTemplate.objects.none()
serializer_class = DocumentTemplateSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self, *args, **kwargs):
if self.request.user.has_module_perms('document_generator'):
return DocumentTemplate.objects.all()
return DocumentTemplate.objects.none()
def create(self, request, *args, **kwargs):
if not request.user.has_perm(
'document_generator.can_upload_template'):
raise PermissionDenied
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if not request.user.has_perm(
'document_generator.can_update_template'):
raise PermissionDenied
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
if not request.user.has_perm(
'document_generator.can_delete_template'):
raise PermissionDenied
return super().destroy(request, *args, **kwargs)
@action(detail=True, methods=['get'], permission_classes=(TokenBasedPermission,))
def file(self, request, pk=None):
document = self.get_object().documenttemplate
if not document:
raise Http404('Attachment does not exist')
response = get_media_response(
request, document,
headers={
'Content-Disposition': (
'attachment;'
f' filename={document.name}'),
}
)
return response
@action(detail=True,
methods=['get'],
url_name='pdf',
url_path='pdf/(?P<request_pk>[^/.]+)',
permission_classes=(TokenBasedPermission,))
def pdf_creator(self, request, pk=None, request_pk=None):
""" Insert data from user request into a template & convert it to pdf
<pk>: template's id
<request_pk>: user request's id
"""
userrequest = get_object_or_404(UserRequest, pk=request_pk)
template = get_object_or_404(DocumentTemplate, pk=pk)
userrequest_type = ContentType.objects.get_for_model(
userrequest)
downloadable_properties = {
'document': template,
'content_type': userrequest_type,
'object_id': userrequest.pk,
}
if not ((request.user.is_superuser
or request.user.has_perm('trrequests.can_download_all_pdf')
or request.user.downloadabledocument_set.filter(
**downloadable_properties).exists())):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
pdf_generator = DocumentGenerator(
DownloadableDocument.objects.get(**downloadable_properties)
)
pdf_path = pdf_generator.get_pdf()
except FileNotFoundError:
return Response(status=status.HTTP_404_NOT_FOUND)
except (ConnectionError, HTTPError):
return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
except TemplateSyntaxError:
return Response(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
data='malformed template'
)
else:
pdf_url = os.path.join(settings.MEDIA_URL, pdf_path)
filename = f'document_{date.today().__str__()}.pdf'
response = get_media_response(
request,
{'path': pdf_path, 'url': pdf_url},
headers={
'Content-Type': 'application/pdf',
'Content-disposition': f'attachment;filename={filename}'
}
)
return response
|
the-stack_106_32155 | from memrise import Course, Data
file = 'test.db'
db = Data(file)
course = Course(2157577)
# Create file database output
db = Data(file)
# Connect to file database and init
db.init_database()
# Update information about the course
db.update_course(course)
# Update IPA for database
db.update_ipa()
# Translate the vocabulary to your own language
db.update_trans('vi')
# Close the database
db.close()
|
the-stack_106_32157 | import requests
import random
import sys
cat_url = "https://cat-fact.herokuapp.com/facts"
r = requests.get(cat_url)
r_obj_list = r.json()
fact_list = []
for fact in r_obj_list:
fact_list.append(fact["text"])
def select_random_fact(fact_arr):
return fact_arr[random.randint(0, len(fact_list) + 1)]
random_fact = select_random_fact(fact_list)
print(random_fact)
print(f"::set-output name=fact::{random_fact}")
|
the-stack_106_32161 | '''
NOTICE
This module was solely developed under MITRE corporation internal funding project code 10AOH630-CA
Approved for Public Release; Distribution Unlimited. Public Release Case Number 20-1780.
(c) 2020 The MITRE Corporation. ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''
import trace
import matplotlib.pyplot
def trace_model_demo(population=20, tm=-1):
# This demo generates and analyzes a random TRACE model
if tm==-1:
# If a TRACE model is not provided, generate one. This function
# generates a TRACE model directly based on a number of nodes, a
# fraction of connections relative to a fully-connected graph, and a
# number of unique threat concepts that are randomly distributed
# across those connections. Note that all graphs in TRACE are directed
# graphs with cycles.
print ("Generating random TRACE model")
tm = trace.generate.trace_model.random(population, 1.5/population, int(population/2)+1)
# Run analyses
# The find_time function finds the time that a trace model has any completed
# path with a given probability by aging a model to different times
# hunt_depth is an optional parameter that is set higher here to give a more
# interesting visual and higher accuracy results for the demo, in practice
# it's not generally necessary to have high accuracy for most uses of this
medianTTC, median_results = trace.montecarlo.find_time(tm, .5, verbose=True, hunt_depth=100)
# The find_mean function ages a model to specific set intervals over and
# over to characterize the overall distribution
# node_details is an optional parameter to capture detailed information on
# a per-node basis, which has a mild impact on computation time
# cc is an optional parameter which lets the user specify convergence
# criteria in terms of (number of consecutive samples that must meet
# criteria, max probability delta across those samples, max days % change
# across those samples), however the probability delta isn't used for
# the find_mean function, only for the find_time function
mean, mean_results = trace.montecarlo.find_mean(tm, node_details=True, verbose=True, cc=(100,.01,.01))
# To turn find_mean into an MTTI analysis, just add the optional involvement
# parameter and set it to true. This makes the function call the MTTI
# algorithm instead of the MTTC algorithm. The MTTC algorithm uses a
# simple spanning tree, but MTTI does an exhaustive recursive backtracking
# through the entire graph every time step, which is much more work
mtti, mtti_results = trace.montecarlo.find_mean(tm, node_details=True, involvement=True, verbose=True, cc=(100,.01,.01), timeframe=mean_results["timeframe"])
# Output some visuals
# The trace.plot module provides a few visuals using standard packages
print ("\nPlotting data")
figure = matplotlib.pyplot.figure()
plot=figure.add_subplot(231)
plot.set_title('Generated TRACE Graph')
# The trace_model function plots a basic trace model with or without results
trace.plot.trace_model(tm)
plot=figure.add_subplot(234)
plot.set_title('Median TTC: ' + str(round(medianTTC, 2)) + '\n\n\nMedian TTC Convergence\n' + str(median_results["histories"]) + " Steps")
plot.set_xlabel("Trial")
# The find_time and find_mean functions return information about convergence
# which is plotted here
plot.plot([x for x in range(len(median_results["t"]))], median_results["t"], "b")
plot.set_ylabel("Days to compromise (blue)")
plot = plot.twinx()
plot.plot([x for x in range(len(median_results["p"]))], median_results["p"], "r")
plot.set_ylabel('Probability (red)')
plot = figure.add_subplot(232)
plot.set_title('MTTC Result')
# To make the trace.plot.trace_model function plot a gradient color scale
# for MTTC analysis results, also pass the results returned by find_mean
trace.plot.trace_model(tm, mean_results)
plot=figure.add_subplot(235)
plot.set_title('MTTC: ' + str(round(mean, 2)) + '\n\n\nMean TTC Convergence\n' + str(mean_results["histories"] * mean_results["resolution"]) + " Steps")
plot.set_xlabel("Trial")
plot.plot([x for x in range(len(mean_results["t"]))], mean_results["t"], "b")
plot.plot([x for x in range(len(mean_results["mu"]))], mean_results["mu"], "r")
plot.set_ylabel('Trial compromise time (blue)\nand running average (red)')
plot = figure.add_subplot(233)
plot.set_title('MTTI Result')
# To make trace.plot.trace_model plot the MTTI, use the involvement option
trace.plot.trace_model(tm, mtti_results, involvement=True)
plot=figure.add_subplot(236)
plot.set_title('MTTC from MTTI: ' + str(round(mtti, 2)) + '\n(Should match)\n\nMean TTI Convergence\n' + str(mtti_results["histories"] * mtti_results["resolution"]) + " Steps")
plot.set_xlabel("Trial")
plot.plot([x for x in range(len(mtti_results["t"]))], mtti_results["t"], "b")
plot.plot([x for x in range(len(mtti_results["mu"]))], mtti_results["mu"], "r")
plot.set_ylabel('Trial overall compromise time (blue)\nand running average (red)')
matplotlib.pyplot.get_current_fig_manager().window.showMaximized()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.show()
if __name__=="__main__":
trace_model_demo()
|
the-stack_106_32163 | """
This is a job script for running NPG/DAPG on hand tasks and other gym envs.
Note that DAPG generalizes PG and BC init + PG finetuning.
With appropriate settings of parameters, we can recover the full family.
"""
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.quadratic_baseline import QuadraticBaseline
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.npg_cg import NPG
from mjrl.algos.dapg import DAPG
from mjrl.algos.behavior_cloning import BC
from mjrl.utils.train_agent import train_agent
from mjrl.samplers.core import sample_paths
import os
import json
import mjrl.envs
# import mj_envs
import time as timer
import pickle
import argparse
# ===============================================================================
# Get command line arguments
# ===============================================================================
parser = argparse.ArgumentParser(description='Policy gradient algorithms with demonstration data.')
parser.add_argument('--output', type=str, required=True, help='location to store results')
parser.add_argument('--config', type=str, required=True, help='path to config file with exp params')
args = parser.parse_args()
JOB_DIR = args.output
if not os.path.exists(JOB_DIR):
os.mkdir(JOB_DIR)
with open(args.config, 'r') as f:
job_data = eval(f.read())
assert 'algorithm' in job_data.keys()
assert any([job_data['algorithm'] == a for a in ['NPG', 'BCRL', 'DAPG']])
job_data['lam_0'] = 0.0 if 'lam_0' not in job_data.keys() else job_data['lam_0']
job_data['lam_1'] = 0.0 if 'lam_1' not in job_data.keys() else job_data['lam_1']
EXP_FILE = JOB_DIR + '/job_config.json'
with open(EXP_FILE, 'w') as f:
json.dump(job_data, f, indent=4)
# ===============================================================================
# Train Loop
# ===============================================================================
e = GymEnv(job_data['env'])
spec = e.spec
# pickle.dump(e.spec, open('envspec.pickle', 'wb'))
# import sys
# sys.exit()
# spec = pickle.load(open('envspec.pickle','rb'))
policy = MLP(spec,
hidden_sizes=job_data['policy_size'],
seed=job_data['seed'],
init_log_std=job_data['init_log_std'])
baseline = MLPBaseline(spec, reg_coef=1e-3, batch_size=job_data['vf_batch_size'],
epochs=job_data['vf_epochs'], learn_rate=job_data['vf_learn_rate'])
# Get demonstration data if necessary and behavior clone
if job_data['algorithm'] != 'NPG':
print("========================================")
print("Collecting expert demonstrations")
print("========================================")
demo_paths = pickle.load(open(job_data['demo_file'], 'rb'))
bc_agent = BC(demo_paths, policy=policy, epochs=job_data['bc_epochs'], batch_size=job_data['bc_batch_size'],
lr=job_data['bc_learn_rate'], loss_type='MSE', set_transforms=False)
in_shift, in_scale, out_shift, out_scale = bc_agent.compute_transformations()
bc_agent.set_transformations(in_shift, in_scale, out_shift, out_scale)
bc_agent.set_variance_with_data(out_scale)
ts = timer.time()
print("========================================")
print("Running BC with expert demonstrations")
print("========================================")
bc_agent.train()
print("========================================")
print("BC training complete !!!")
print("time taken = %f" % (timer.time() - ts))
print("========================================")
if job_data['eval_rollouts'] >= 1:
# % with constant start state and goal, just 1 episode is sufficient.
score = e.evaluate_policy(policy, num_episodes=1, mean_action=True, visual=False)
print("Score with behavior cloning = %f" % score[0][0])
if job_data['algorithm'] != 'DAPG':
# We throw away the demo data when training from scratch or fine-tuning with RL without explicit augmentation
demo_paths = None
# ===============================================================================
# RL Loop
# ===============================================================================
# e = GymEnv(job_data['env'])
# e=[]
rl_agent = DAPG(e, policy, baseline, demo_paths,
normalized_step_size=job_data['rl_step_size'],
lam_0=job_data['lam_0'], lam_1=job_data['lam_1'],
seed=job_data['seed'], save_logs=True
)
# import pybullet as p
# p.disconnect()
# del e
print("========================================")
print("Starting reinforcement learning phase")
print("========================================")
ts = timer.time()
train_agent(job_name=JOB_DIR,
agent=rl_agent,
seed=job_data['seed'],
niter=job_data['rl_num_iter'],
gamma=job_data['rl_gamma'],
gae_lambda=job_data['rl_gae'],
num_cpu=job_data['num_cpu'],
sample_mode='trajectories',
num_traj=job_data['rl_num_traj'],
save_freq=job_data['save_freq'],
evaluation_rollouts=job_data['eval_rollouts'])
print("time taken = %f" % (timer.time()-ts))
|
the-stack_106_32165 | '''BlockMacro, define a macro.
:copyright: 2015, Jeroen van der Heijden (Cesbit)
'''
import re
from .exceptions import DefineBlockError
from .constants import ALWAYS_ALLOWED, LINE_END
class BlockMacro:
RE_MACRO = re.compile(r'^\s*#macro\s+([a-zA-Z0-9_]+)\s*:\s*$', re.UNICODE)
def __init__(self, lines):
'''Initialize #macro.'''
from .block import Block
self._name = self._compile(lines)
self._block = Block(lines, allowed=ALWAYS_ALLOWED | LINE_END)
def render(self, namespace):
'''Add the macro to the namespace.
Note: at this point the macro will be rendered.
'''
namespace.add_macro(self._name, self._block)
return None
@classmethod
def _compile(cls, lines):
'''Return the macro name from the current line.'''
m = cls.RE_MACRO.match(lines.current)
if m is None:
raise DefineBlockError(
'Incorrect macro definition at line {}, {}\nShould be '
'something like: #macro my_macro:'
.format(lines.pos, lines.current))
return m.group(1)
|
the-stack_106_32167 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.test import TestCase
from mock import patch
from apps.backend.api.constants import OS
from apps.backend.components.collections.job import JobFastPushFileComponent
from apps.backend.tests.components.collections.job import utils
from pipeline.component_framework.test import (
ComponentTestCase,
ComponentTestMixin,
ExecuteAssertion,
Patcher,
ScheduleAssertion,
)
class JobFastPushFileComponentTestCase(TestCase, ComponentTestMixin):
JOB_CLIENT = {"bk_biz_id": 2, "username": "admin", "os_type": OS.LINUX}
TASK_RESULT = {
"success": [{"ip": "127.0.0.1", "bk_cloud_id": 0, "log_content": "success", "error_code": 0, "exit_code": 0}],
"pending": [],
"failed": [],
}
JOB_FAST_PUSH_FILE = {
"job_client": JOB_CLIENT,
"ip_list": [{"ip": "127.0.0.1", "bk_supplier_id": 0, "bk_cloud_id": 0}],
"file_target_path": "/data/",
"file_source": [
{
"files": ["/data/dev_pipeline_unit_test"],
"account": "root",
"ip_list": [{"bk_cloud_id": 0, "ip": "127.0.0.1", "bk_supplier_id": 0}],
}
],
}
def setUp(self):
self.job_client = utils.JobMockClient(
fast_push_file_return=utils.JOB_EXECUTE_TASK_RETURN,
get_job_instance_log_return=utils.JOB_GET_INSTANCE_LOG_RETURN,
)
patch(utils.JOB_VERSION_MOCK_PATH, "V3").start()
def component_cls(self):
# return the component class which should be tested
return JobFastPushFileComponent
def cases(self):
# return your component test cases here
return [
ComponentTestCase(
name="测试成功快速上传文件",
inputs=self.JOB_FAST_PUSH_FILE,
parent_data={},
execute_assertion=ExecuteAssertion(
success=True,
outputs={"job_instance_id": utils.TASK_ID, "polling_time": 0},
),
schedule_assertion=ScheduleAssertion(
success=True,
outputs={"polling_time": 0, "task_result": self.TASK_RESULT, "job_instance_id": utils.TASK_ID},
callback_data=None,
schedule_finished=True,
),
patchers=[Patcher(target=utils.JOB_CLIENT_MOCK_PATH, return_value=self.job_client)],
)
]
|
the-stack_106_32169 | '''
scan_instances
==============
NOTE: not currently tested code.
The following methods allow for interaction into the Tenable.sc
`Scan Result <https://docs.tenable.com/sccv/api/Scan-Result.html>`_ API. While
the Tenable.sc API refers to the model these endpoints interact with as
*ScanResult*, were actually interacting with an instance of a scan definition
stored within the *Scan* API endpoints. These scan instances could be running
scans, stopped scans, errored scans, or completed scans. These items are
typically seen under the **Scan Results** section of Tenable.sc.
Methods available on ``sc.scan_instances``:
.. rst-class:: hide-signature
.. autoclass:: ScanResultAPI
.. automethod:: copy
.. automethod:: delete
.. automethod:: details
.. .. automethod:: email
.. automethod:: export_scan
.. automethod:: import_scan
.. automethod:: list
.. automethod:: pause
.. automethod:: reimport_scan
.. automethod:: resume
.. automethod:: stop
'''
from .base import SCEndpoint
from tenable.utils import dict_merge
from io import BytesIO
class ScanResultAPI(SCEndpoint):
def copy(self, id, users=None):
'''
Clones the scan instance.
+ `SC ScanResult Copy <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/copy>`_
Args:
id (int): The identifier of the scan instance to clone.
users (list, optional):
A list of user ids to associate to the scan instance.
Returns:
dict
Examples:
>>> sc.scan_instances.copy(1)
'''
payload = dict()
if users:
payload['users'] = [{'id': self_check('user:id', u, int)}
for u in self._check('users', users, list)]
return self._api.post('scanResult/{}/copy'.format(
self._check('id', id, int)), json=payload).json()['response']
def delete(self, id):
'''
Removes the scan instance from TenableSC.
+ `SC ScanResult Delete <https://docs.tenable.com/sccv/api/Scan-Result.html#scanResult_id_DELETE>`_
Args:
id (int): The identifier of the scan instance to delete.
Returns:
dict
Examples:
>>> sc.scan_instances.delete(1)
'''
return self._api.delete('scanResult/{}'.format(
self._check('id', id, int))).json()['response']
def details(self, id, fields=None):
'''
Retreives the details for the specified scan instance.
+ `SC ScanResult <https://docs.tenable.com/sccv/api/Scan-Result.html#scanResult_id_GET>`_
Args:
id (int): The identifier for the scan instance to be retrieved.
fields (list, optional):
List of fields to return. Refer to the API documentation
referenced above for a list of available fields.
Returns:
dict: The scan instance resource record.
Examples:
Getting the details of a scan instance with just the
default parameters:
>>> scan = sc.scan_instances.details(1)
>>> pprint(scan)
Specifying what fields you'd like to be returned:
>>> scan = sc.scan_instances.details(1,
... fields=['name', 'status', 'scannedIPs', 'startTime', 'finishTime'])
>>> pprint(scan)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in self._check('fields', fields, list)])
return self._api.get('scanResult/{}'.format(self._check('id', id, int)),
params=params).json()['response']
def email(self, id, *emails):
'''
DOC-ISSUE: SC Api Docs do not explain what this does.
+ `SC ScanResult Email <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/email>`_
Args:
id (int): The identifier for the specified scan instance.
*emails (str): Valid email
Returns:
dict
Examples:
>>> sc.scan_instances.email(1, )
'''
return self._api.post('scanResult/{}/email'.format(
self._check('id', id, int)), json={'email': ','.join(
[self._check('address', e, str)
for e in self._check('emails', emails, list)])}).json()['response']
def export_scan(self, id, fobj=None, export_format=None):
'''
Downloads the results of the scan.
+ `SC ScanResult Download <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/download>`_
Args:
id (int): The scan instance identifier.
export_format (str, optional):
The format of the resulting data. Allowable values are
``scap1_2`` and ``v2``. ``v2`` is the default value if none
are specified.
fobj (FileObject, optional):
The file-like object to write the resulting file into. If
no file-like object is provided, a BytesIO objects with the
downloaded file will be returned. Be aware that the default
option of using a BytesIO object means that the file will be
stored in memory, and it's generally recommended to pass an
actual file-object to write to instead.
Returns:
FileObject: The file-like object with the resulting export.
Examples:
>>> with open('example.nessus', 'wb') as fobj:
... sc.scan_instances.export(1, fobj)
'''
resp = self._api.post('scanResult/{}/download'.format(
self._check('id', id, int)), stream=True, json={
'downloadType': self._check('export_format', export_format, str,
choices=['scap1_2', 'v2'], default='v2')})
# if no file-like object was passed, then we will instantiate a BytesIO
# object to push the file into.
if not fobj:
fobj = BytesIO()
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
return fobj
def import_scan(self, fobj, repo, **kw):
'''
Imports a nessus file into Tenable.sc.
+ `SC Scan Import <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/import>`_
Args:
fobj (FileObject):
The file-like object containing the Nessus file to import.
repo (int):
The repository id for the scan.
auto_mitigation (int, optional):
How many days to hold on to data before mitigating it? The
default value is 0.
host_tracking (bool, optional):
Should DHCP host tracking be enabled? The default is False.
vhosts (bool, optional):
Should virtual host logic be enabled for the scan? The default
is ``False``.
Returns:
str: An emty string response.
Examples:
>>> with open('example.nessus') as fobj:
... sc.scan_instances.import_scan(fobj, 1)
'''
kw['repo'] = repo
payload = self._api.scans._constructor(**kw)
payload['filename'] = self._api.files.upload(fobj)
return self._api.post(
'scanResult/import', json=payload).json()['response']
def reimport_scan(self, id, **kw):
'''
Re-imports an existing scan into the cumulative repository.
+ `SC Scan Re-Import <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/import>`_
Args:
id (int):
The scan instance identifier.
auto_mitigation (int, optional):
How many days to hold on to data before mitigating it? The
default value is 0.
host_tracking (bool, optional):
Should DHCP host tracking be enabled? The default is False.
vhosts (bool, optional):
Should virtual host logic be enabled for the scan? The default
is ``False``.
Returns:
str: An emty string response.
Examples:
>>> sc.scan_instances.reimport_scan(1)
'''
payload = self._api.scans._constructor(kw)
return self._api.post('scanResult/{}/import'.format(self._check(
'id', id, int)), json=payload).json()['response']
def list(self, fields=None):
'''
Retreives the list of scan instances.
+ `SC ScanResult List <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult>`_
Args:
fields (list, optional):
A list of attributes to return.
Returns:
list: A list of scan instance resources.
Examples:
>>> for scan in sc.scan_instances.list():
... pprint(scan)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('scanResult', params=params).json()['response']
def pause(self, id):
'''
Pauses a running scan instance. Note that this will not impact agent
scan instances.
+ SC Scan Pause <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/pause>`_
Args:
id (int): The unique identifier for the scan instance.
Returns:
list: List of scan instances modified.
Examples:
>>> sc.scan_instances.pause(1)
'''
return self._api.post('scanResult/{}/pause'.format(self._check(
'id', id, int))).json()['response']['scanResults']
def resume(self, id):
'''
Resumes a paused scan instance. Note that this will not impact agent
scan instances.
+ SC Scan Resume <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/resume>`_
Args:
id (int): The unique identifier for the scan instance.
Returns:
list: List of scan instances modified.
Examples:
>>> sc.scan_instances.resume(1)
'''
return self._api.post('scanResult/{}/resume'.format(self._check(
'id', id, int))).json()['response']['scanResults']
def stop(self, id):
'''
Stops a running scan instance. Note that this will not impact agent
scan instances.
+ SC Scan Stop <https://docs.tenable.com/sccv/api/Scan-Result.html#ScanResultRESTReference-/scanResult/{id}/stop>`_
Args:
id (int): The unique identifier for the scan instance.
Returns:
dict: Response dictionary
Examples:
>>> sc.scan_instances.stop(1)
'''
return self._api.post('scanResult/{}/stop'.format(self._check(
'id', id, int))).json()['response']
|
the-stack_106_32170 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import logging
import optparse
import os
import subprocess
from uuid import uuid1
from apache.aurora.client.base import die
"""Admin client utility functions shared between admin and maintenance modules."""
# TODO(maxim): Switch to CLI ConfigurationPlugin within AURORA-486.
LOGGER_NAME = 'aurora_admin'
logger = logging.getLogger(LOGGER_NAME)
CLIENT_ID = uuid1()
def log_admin_message(sev, msg, *args, **kwargs):
"""Logs message using the module-defined logger.
:param sev: message severity
:type sev: The numeric level of the logging event (one of DEBUG, INFO etc.)
:param msg: message to log
:type msg: string
"""
extra = kwargs.get('extra', {})
extra['clientid'] = CLIENT_ID
extra['user'] = getpass.getuser()
extra['logger_name'] = LOGGER_NAME
kwargs['extra'] = extra
logger.log(sev, msg, *args, **kwargs)
FILENAME_OPTION = optparse.Option(
'--filename',
dest='filename',
default=None,
help='Name of the file with hostnames')
HOSTS_OPTION = optparse.Option(
'--hosts',
dest='hosts',
default=None,
help='Comma separated list of hosts')
def parse_sla_percentage(percentage):
"""Parses percentage value for an SLA check.
:param percentage: string percentage to parse
:type percentage: string
:rtype: float
"""
val = float(percentage)
if val <= 0 or val > 100:
die('Invalid percentage %s. Must be within (0, 100].' % percentage)
return val
def _parse_hostname_list(hostname_list):
hostnames = [hostname.strip() for hostname in hostname_list.split(",")]
if not hostnames:
die('No valid hosts found.')
return hostnames
def _parse_hostname_file(filename):
with open(filename, 'r') as hosts:
hostnames = [hostname.strip() for hostname in hosts]
if not hostnames:
die('No valid hosts found in %s.' % filename)
return hostnames
def parse_hostnames_optional(list_option, file_option):
"""Parses host names from a comma-separated list or a filename.
Does not require either of the arguments (returns None list if no option provided).
:param list_option: command option with comma-separated list of host names
:type list_option: app.option
:param file_option: command option with filename (one host per line)
:type file_option: app.option
:rtype: list of host names or None.
"""
if bool(list_option) and bool(file_option):
die('Cannot specify both filename and list for the same option.')
hostnames = None
if file_option:
hostnames = _parse_hostname_file(file_option)
elif list_option:
hostnames = _parse_hostname_list(list_option)
return hostnames
def parse_hostnames(filename, hostnames):
"""Parses host names from a comma-separated list or a filename.
Fails if neither filename nor hostnames provided.
:param filename: filename with host names (one per line)
:type filename: string
:param hostnames: comma-separated list of host names
:type hostnames: string
:rtype: list of host names
"""
if bool(filename) == bool(hostnames):
die('Please specify either --filename or --hosts')
if filename:
hostnames = _parse_hostname_file(filename)
elif hostnames:
hostnames = _parse_hostname_list(hostnames)
if not hostnames:
die('No valid hosts found.')
return hostnames
def parse_script(filename):
"""Parses shell script from the provided file and wraps it up into a subprocess callback.
:param filename: name of the script file
:type filename: string
:rtype: function
"""
if filename:
if not os.path.exists(filename):
die("No such file: %s" % filename)
cmd = os.path.abspath(filename)
return lambda host: subprocess.Popen([cmd, host])
else:
return None
def print_results(results):
"""Prints formatted SLA results.
:param results: formatted SLA results
:type results: list of string
"""
for line in results:
print(line)
def format_sla_results(host_groups, unsafe_only=False):
"""Formats SLA check result output.
:param host_groups: SLA check result groups (grouped by external grouping criteria, e.g. by_host)
:type host_groups: list of (defaultdict(list))
:param unsafe_only: If True, includes only SLA-"unsafe" hosts from the results
:type unsafe_only: bool
:rtype: a tuple of: list of output strings, set of hostnames included in output.
"""
results = []
include_unsafe_only = lambda d: not d.safe if unsafe_only else True
hostnames = set()
for group in host_groups:
for host, job_details in sorted(group.items()):
host_details = '\n'.join(
['%s\t%s\t%.2f\t%s\t%s' %
(host,
d.job.to_path(),
d.predicted_percentage,
d.safe,
'n/a' if d.safe_in_secs is None else d.safe_in_secs)
for d in sorted(job_details) if include_unsafe_only(d)])
if host_details:
results.append(host_details)
hostnames.add(host)
return results, hostnames
|
the-stack_106_32172 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of color ops"""
import pytest
import tensorflow as tf
import numpy as np
from tensorflow_addons.image import color_ops
from PIL import Image, ImageOps, ImageEnhance
_DTYPES = {
np.uint8,
np.int32,
np.int64,
np.float16,
np.float32,
np.float64,
}
_SHAPES = {(5, 5), (5, 5, 1), (5, 5, 3), (4, 5, 5), (4, 5, 5, 1), (4, 5, 5, 3)}
@pytest.mark.parametrize("dtype", _DTYPES)
@pytest.mark.parametrize("shape", _SHAPES)
def test_equalize_dtype_shape(dtype, shape):
image = np.ones(shape=shape, dtype=dtype)
equalized = color_ops.equalize(tf.constant(image)).numpy()
np.testing.assert_equal(equalized, image)
assert equalized.dtype == image.dtype
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_equalize_with_PIL():
np.random.seed(0)
image = np.random.randint(low=0, high=255, size=(4, 3, 3, 3), dtype=np.uint8)
equalized = np.stack([ImageOps.equalize(Image.fromarray(i)) for i in image])
np.testing.assert_equal(color_ops.equalize(tf.constant(image)).numpy(), equalized)
@pytest.mark.parametrize("dtype", _DTYPES)
@pytest.mark.parametrize("shape", _SHAPES)
def test_sharpness_dtype_shape(dtype, shape):
image = np.ones(shape=shape, dtype=dtype)
sharp = color_ops.sharpness(tf.constant(image), 0).numpy()
np.testing.assert_equal(sharp, image)
assert sharp.dtype == image.dtype
@pytest.mark.parametrize("factor", [0, 0.25, 0.5, 0.75, 1])
def test_sharpness_with_PIL(factor):
np.random.seed(0)
image = np.random.randint(low=0, high=255, size=(10, 5, 5, 3), dtype=np.uint8)
sharpened = np.stack(
[ImageEnhance.Sharpness(Image.fromarray(i)).enhance(factor) for i in image]
)
np.testing.assert_allclose(
color_ops.sharpness(tf.constant(image), factor).numpy(), sharpened, atol=1
)
|
the-stack_106_32174 | import os
import re
import subprocess # nosec
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
"""
Load schema from remote Heroku PostgreSQL database.
This command can be useful to load the Heroku Connect database schema into
a local development environment.
Example::
python manage.py load_remote_schema --app ninja | psql -a
.. note::
This command requires the `Heroku CLI`_ and PostgreSQL_ to be installed.
.. _`Heroku CLI`: https://cli.heroku.com/
.. _PostgreSQL: https://www.postgresql.org/
"""
help = __doc__.strip().splitlines()[0]
url_pattern = (
r"postgres://(?P<user>[\d\w]*):(?P<passwd>[\d\w]*)"
r"@(?P<host>[^:]+):(?P<port>\d+)/(?P<dbname>[\d\w]+)"
)
def add_arguments(self, parser):
parser.add_argument(
"--app",
"-a",
dest="HEROKU_APP",
type=str,
help="Heroku app name that the schema will be pulled from.",
)
parser.add_argument(
"--schema",
"-s",
dest="SCHEMA_NAME",
type=str,
default="salesforce",
help="Name of schema that you want to load.",
)
def handle(self, *args, **options):
heroku_app = options.get("HEROKU_APP")
schema_name = options["SCHEMA_NAME"]
url = self.get_database_url(heroku_app)
credentials = self.parse_credentials(url)
schema = self.get_schema(**credentials, schema_name=schema_name)
self.stdout.write(schema)
@staticmethod
def get_database_url(heroku_app):
run_args = ["heroku", "pg:credentials:url"]
if heroku_app:
run_args += ["-a", heroku_app]
try:
output = subprocess.check_output(run_args) # nosec
except subprocess.SubprocessError as e:
raise CommandError("Please provide the correct Heroku app name.") from e
else:
return output.decode("utf-8")
def parse_credentials(self, url):
match = re.search(self.url_pattern, url)
if not match:
raise CommandError("Could not parse DATABASE_URL.")
return match.groupdict()
@staticmethod
def get_schema(user, host, port, dbname, passwd, schema_name):
env = os.environ.copy()
env["PGPASSWORD"] = passwd
run_args = [
"pg_dump",
"-sO",
"-n",
schema_name,
"-U",
user,
"-h",
host,
"-p",
port,
"-d",
dbname,
]
try:
output = subprocess.check_output(run_args, env=env) # nosec
except subprocess.SubprocessError as e:
raise CommandError("Schema not found.") from e
else:
return output.decode("utf-8")
|
the-stack_106_32176 | import numpy as np
import h5py
import random
import tensorflow as tf
import os
from typing import List
AUTOTUNE = tf.data.experimental.AUTOTUNE
def images_in_paths(folder_path: str) -> List[str]:
"""
Collects paths to all images from one folder and return them as a list
:param folder_path:
:return: list of path/to/image
"""
paths = []
folder_path = os.path.join(os.getcwd(), folder_path)
for root, dirs, files in os.walk(folder_path):
for file in files:
paths.append(os.path.join(root, file))
return paths
class ImageGenerator:
"""
ImageGenerator takes care to serve all path/to/images in the right way and to serve the dataset info about mean,
standard deviation and the number of items in folder.
"""
def __init__(self, data_path):
"""
Open file once. The object ImageGenerator is kept alive for all the training (and evaluation). So lists and file
are not loaded to disk yet, waiting for call function.
:param data_path: path/to/data folder, in which there are train, val, test folders with images and the file 'info.h5' with
std/mean and number of items in dataset
"""
self.data_path = data_path
self.h5f = h5py.File(os.path.join(data_path, 'info.h5'),
'r') # it will be closed when the context will be terminated
def __call__(self, dataset_type, num_classes, *args, **kwargs):
"""
Instance is called with different dataset_type
:param dataset_type:
:param args:
:param kwargs:
:return:
"""
paths = images_in_paths(os.path.join(self.data_path, dataset_type))
labels = [random.randrange(num_classes) for _ in paths]
return paths, labels
class CropsGenerator:
"""
CropsGenerator takes care to load images from disk and convert, crop and serve them as a tf.data.Dataset
"""
def __init__(self, conf, max_hamming_set):
self.data_path = conf.data_path # path to dataset folder
self.img_generator = ImageGenerator(self.data_path) # generates the instance to dataset files
self.numChannels = conf.numChannels # num of input image channels
self.numCrops = conf.numCrops # num of jigsaw crops
self.original_dim = conf.original_dim # size of the input image
self.cropSize = conf.cropSize # size of crop (255)
self.cellSize = conf.cellSize # size of each cell (75)
self.tileSize = conf.tileSize # size of tile in cell (64)
self.colorJitter = conf.colorJitter # number of pixels for color jittering
self.batchSize = conf.batchSize # training_batch_size
self.val_batch_size = conf.val_batch_size # validation batch size
self.meanTensor, self.stdTensor = self.get_stats() # get stats from dataset info
# the <max_hamming_set> comes from file generated by <generate_hamming_set>. For further information see it.
self.maxHammingSet = np.array(max_hamming_set, dtype=np.uint8)
self.numClasses = conf.hammingSetSize # number of different jigsaw classes
# do not retrieve info about dataset with h5f['train_img'][:].shape since it loads the whole dataset into RAM
self.num_train_batch = self.img_generator.h5f['train_dim'][...].astype(np.int32) // self.batchSize
self.num_val_batch = self.img_generator.h5f['val_dim'][...].astype(np.int32) // conf.val_batch_size
self.num_test_batch = self.img_generator.h5f['test_dim'][...].astype(np.int32) // self.batchSize
def get_stats(self):
"""
Return mean and std from dataset. It has been memorized. If not mean or std have been saved a KeyError is raised.
:return:
"""
mean = self.img_generator.h5f['train_mean'][:].astype(np.float32)
std = self.img_generator.h5f['train_std'][:].astype(np.float32)
if self.numChannels == 1:
mean = np.expand_dims(mean, axis=-1)
std = np.expand_dims(std, axis=-1)
return mean, std
def one_crop(self, hm_index, crop_x, crop_y, x):
"""
This function creates one cropping at a time.
It's sort of the contrary wrt previous behaviour. Now we find the hm_index crop we want to locate and we create
it. As the result is stacked in the first axis, the order is kept as the label requires.
:param hm_index: permutation index in the hamming set
:param crop_x: x position for the 225 initial crop of the image
:param crop_y: x position for the 225 initial crop of the image
:param x: original image
:return: one crop
"""
# Define behaviour of col and rows to keep compatibility with previous code
col = tf.math.mod(hm_index, 3)
row = tf.math.divide(hm_index, 3)
# cast to get rid of decimal values. However multiply takes only float32, so we have to re-cast it
row = tf.cast(row, tf.int16)
row = tf.cast(row, tf.float32)
# create tf.constant to keep compatibility
crop_x = tf.constant(crop_x, dtype=tf.float32)
crop_y = tf.constant(crop_y, dtype=tf.float32)
# random_x = tf.constant(random.randrange(self.cellSize - self.tileSize), dtype=tf.float32)
random_x = float(random.randrange(self.cellSize - self.tileSize))
# random_y = tf.constant(random.randrange(self.cellSize - self.tileSize), dtype=tf.float32)
random_y = float(random.randrange(self.cellSize - self.tileSize))
cell_size = tf.constant(self.cellSize, dtype=tf.float32)
tile_size = tf.constant(self.tileSize, dtype=tf.float32)
# partial operations
col_cellsize = tf.math.multiply(col, cell_size)
row_cellsize = tf.math.multiply(row, cell_size)
x_start = tf.add(tf.math.add(crop_x, col_cellsize), random_x)
y_start = tf.add(tf.math.add(crop_y, row_cellsize), random_y)
x_next = tf.math.add(x_start, tile_size)
y_next = tf.math.add(y_start, tile_size)
# cast every value to int so we can take slices of x
x_start = tf.cast(x_start, dtype=tf.int32)
y_start = tf.cast(y_start, dtype=tf.int32)
x_next = tf.cast(x_next, dtype=tf.int32)
y_next = tf.cast(y_next, dtype=tf.int32)
crop = x[y_start:y_next, x_start:x_next, :]
# spatial jittering of crop
crop = self.color_channel_jitter(crop)
# ensure that resulting shape is correct
tf.ensure_shape(crop, shape=(64, 64, 3))
return crop
def create_croppings(self, x: tf.Tensor, y: tf.Tensor, hamming_set: tf.Tensor):
"""
Makes croppings from image
The 3x3 grid is numbered as follows:
0 1 2
3 4 5
6 7 8
:param x:
:param y:
:param hamming_set:
:return:
"""
# retrieve shape of image
y_dim, x_dim = x.shape[:2]
# Have the x & y coordinate of the crop
if x_dim != self.cropSize:
# dimension of x is bigger than cropSize so we can take a square window inside image
crop_x = random.randrange(x_dim - self.cropSize)
crop_y = random.randrange(y_dim - self.cropSize)
else:
crop_x, crop_y = 0, 0
# define variable before mapping
# create lambda function for mapping
one_crop_func = lambda hm_index: self.one_crop(hm_index, crop_x, crop_y, x)
# this mapping takes one element at a time from <hamming_set> and serve it to croppings_func. So for
# croppings_func hm_index is served from hamming_set, the other parameters from <create_croppings> function body
# This map returns a tensor that has the one_crop stacked together in the first dimension
croppings = tf.map_fn(one_crop_func, hamming_set)
# change order of axis (move one_crop dimension from first to last)
x = tf.transpose(croppings, [1, 2, 3, 0])
return x, tf.one_hot(y, self.numClasses)
def add_grayscale(self, x: tf.Tensor, perm_index: tf.Tensor, hamming_set):
"""
Normalize data one image at a time
:param x: is a single images.
:param perm_index:
:param hamming_set:
:return: image, normalized wrt dataset mean and std
"""
# make it greyscale with probability 0.3%
if random.random() < 0.3:
x = 0.21 * x[..., 2] + 0.72 * x[..., 1] + 0.07 * x[..., 0]
# expanding dimension to preserve net layout
x = tf.expand_dims(x, axis=-1)
x = tf.concat([x, x, x], axis=-1)
return x, perm_index, hamming_set
def color_channel_jitter(self, img):
"""
Spatial image jitter, aka movement of color channel in various manners
"""
r_jit = random.randrange(-self.colorJitter, self.colorJitter)
g_jit = random.randrange(-self.colorJitter, self.colorJitter)
b_jit = random.randrange(-self.colorJitter, self.colorJitter)
return tf.stack((
tf.roll(img[:, :, 0], r_jit, axis=0),
tf.roll(img[:, :, 1], g_jit, axis=1),
tf.roll(img[:, :, 2], b_jit, axis=0)
), axis=2)
def parse_path(self, path: tf.Tensor, label: tf.Tensor) -> (tf.Tensor, tf.Tensor, tf.Tensor):
"""
Read image from disk and apply a label to it
:param path: path to one image. This is a tf.Tensor and contains a string
:return:
"""
# read image from disk
img = tf.io.read_file(path)
# decode it as jpeg
img = tf.image.decode_jpeg(img, channels=3)
# data augmentation
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
# cast to tensor with type tf.float32
img = tf.cast(img, dtype=tf.float32)
# make the image distant from std deviation of the dataset
img = tf.math.subtract(img, self.meanTensor)
img = tf.math.divide(img, self.stdTensor)
# create hamming set from label
hamming_set = tf.cast(tf.gather(self.maxHammingSet, label), dtype=tf.float32)
return img, label, hamming_set
def generate_train_set(self):
"""
Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
:param mode: train-val-test
:return: tf.data.Dataset
"""
parse_path_func = lambda x, y: self.parse_path(x, y)
add_grayscale_func = lambda x, y, z: self.add_grayscale(x, y, z)
create_croppings_func = lambda x, y, z: self.create_croppings(x, y, z)
batch_size = self.batchSize
n_el = self.num_train_batch
return (tf.data.Dataset.from_tensor_slices(self.img_generator('train', self.numClasses))
.shuffle(buffer_size=n_el * batch_size)
.map(parse_path_func, num_parallel_calls=AUTOTUNE)
.map(add_grayscale_func, num_parallel_calls=AUTOTUNE) # add grayscale img w/ p<0.3 in train
.map(create_croppings_func, num_parallel_calls=AUTOTUNE) # create actual one_crop
.batch(batch_size) # defined batch_size
.prefetch(AUTOTUNE) # number of batches to be prefetch.
.repeat() # repeats the dataset when it is finished
)
def generate_val_set(self):
"""
Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
:param mode: train-val-test
:return: tf.data.Dataset
"""
parse_path_func = lambda x, y: self.parse_path(x, y)
create_croppings_func = lambda x, y, z: self.create_croppings(x, y, z)
batch_size = self.val_batch_size
n_el = self.num_val_batch
return (tf.data.Dataset.from_tensor_slices(self.img_generator('val', self.numClasses))
.shuffle(buffer_size=n_el * batch_size)
.map(parse_path_func, num_parallel_calls=AUTOTUNE)
.map(create_croppings_func, num_parallel_calls=AUTOTUNE) # create actual one_crop
.batch(batch_size) # defined batch_size
.prefetch(AUTOTUNE) # number of batches to be prefetch.
.repeat() # repeats the dataset when it is finished
)
def generate_test_set(self):
"""
Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
:param mode: train-val-test
:return: tf.data.Dataset
"""
parse_path_func = lambda x, y: self.parse_path(x, y)
create_croppings_func = lambda x, y, z: self.create_croppings(x, y, z)
batch_size = self.batchSize
n_el = self.num_test_batch
return (tf.data.Dataset.from_tensor_slices(self.img_generator('test', self.numClasses))
.shuffle(buffer_size=n_el * batch_size)
.map(parse_path_func, num_parallel_calls=AUTOTUNE)
.map(create_croppings_func, num_parallel_calls=AUTOTUNE) # create actual one_crop
.batch(batch_size) # defined batch_size
.prefetch(AUTOTUNE) # number of batches to be prefetch.
.repeat() # repeats the dataset when it is finished
)
# UNCOMMENT ADDITION AND DIVISION PER MEAN AND STD BEFORE TRY TO SEE IMAGES
if __name__ == '__main__':
import config as conf
from PIL import Image
os.chdir(os.pardir)
with h5py.File(os.path.join('Dataset', conf.resources, conf.hammingFileName + str(conf.hammingSetSize) + '.h5'),
'r') as h5f:
HammingSet = np.array(h5f['max_hamming_set'])
data_reader = CropsGenerator(conf, HammingSet)
iter = data_reader.generate_train_set().make_initializable_iterator()
x, labels = iter.get_next()
with tf.Session() as sess:
sess.run(iter.initializer)
# returns a batch of images
tiles, labels = sess.run([x, labels])
# select only one (choose which in [0, batchSize)
n_image = 4
image = np.array(tiles[n_image], dtype=np.float32)
first_label = np.array(labels[n_image])
# from one_hot to number
lbl = np.where(first_label == np.amax(first_label))[0][0]
# create complete image with pieces (if label is correct then also will be image)
complete = np.zeros((192, 192, 3))
tile_size = data_reader.tileSize
for i, v in enumerate(data_reader.maxHammingSet[lbl]):
row = int(v / 3)
col = v % 3
y_start = row * tile_size
x_start = col * tile_size
complete[y_start:y_start + tile_size, x_start:x_start + tile_size] = image[:, :, :, i]
Image.fromarray(np.array(complete, dtype=np.uint8)).show()
|
the-stack_106_32177 | import string
from urllib import urlencode
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import commonware.log
from oauthlib import oauth1
from oauthlib.common import safe_string_equals
from mkt.api.models import Access, Nonce, Token, REQUEST_TOKEN, ACCESS_TOKEN
from mkt.site.decorators import login_required
from mkt.site.utils import urlparams
DUMMY_CLIENT_KEY = u'DummyOAuthClientKeyString'
DUMMY_REQUEST_TOKEN = u'DummyOAuthRequestToken'
DUMMY_ACCESS_TOKEN = u'DummyOAuthAccessToken'
DUMMY_SECRET = u'DummyOAuthSecret'
log = commonware.log.getLogger('z.api')
def get_request_headers(request):
return {
'Authorization': request.META.get('HTTP_AUTHORIZATION'),
'Content-Type': request.META.get('CONTENT_TYPE', '')
}
class MarketplaceOAuthRequestValidator(oauth1.RequestValidator):
safe_characters = set(string.printable)
nonce_length = (7, 128)
access_token_length = (8, 128)
request_token_length = (8, 128)
verifier_length = (8, 128)
client_key_length = (8, 128)
enforce_ssl = False # SSL enforcement is handled by ops. :-)
def validate_client_key(self, key, request):
request.attempted_key = key
return Access.objects.filter(key=key).exists()
def get_client_secret(self, key, request):
# This method returns a dummy secret on failure so that auth
# success and failure take a codepath with the same run time,
# to prevent timing attacks.
try:
# OAuthlib needs unicode objects, django-aesfield returns a string.
return Access.objects.get(key=key).secret.decode('utf8')
except Access.DoesNotExist:
return DUMMY_SECRET
@property
def dummy_client(self):
return DUMMY_CLIENT_KEY
@property
def dummy_request_token(self):
return DUMMY_REQUEST_TOKEN
@property
def dummy_access_token(self):
return DUMMY_ACCESS_TOKEN
def get_default_realms(self, client_key, request):
return []
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None,
access_token=None):
n, created = Nonce.objects.safer_get_or_create(
defaults={'client_key': client_key},
nonce=nonce, timestamp=timestamp,
request_token=request_token,
access_token=access_token)
return created
def validate_requested_realms(self, client_key, realms, request):
return True
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
return True
def validate_redirect_uri(self, client_key, redirect_uri, request):
return True
def validate_request_token(self, client_key, request_token, request):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
return Token.objects.filter(token_type=REQUEST_TOKEN,
creds__key=client_key,
key=request_token).exists()
def validate_access_token(self, client_key, access_token, request):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
return Token.objects.filter(token_type=ACCESS_TOKEN,
creds__key=client_key,
key=access_token).exists()
def validate_verifier(self, client_key, request_token, verifier, request):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, token_type=REQUEST_TOKEN)
candidate = t.verifier
except Token.DoesNotExist:
candidate = ''
return safe_string_equals(candidate, verifier)
def get_request_token_secret(self, client_key, request_token, request):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, creds__key=client_key,
token_type=REQUEST_TOKEN)
return t.secret
except Token.DoesNotExist:
return DUMMY_SECRET
def get_access_token_secret(self, client_key, request_token, request):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, creds__key=client_key,
token_type=ACCESS_TOKEN)
except Token.DoesNotExist:
return DUMMY_SECRET
return t.secret
validator = MarketplaceOAuthRequestValidator()
server = oauth1.WebApplicationServer(validator)
@csrf_exempt
def access_request(request):
try:
oauth_req = server._create_request(request.build_absolute_uri(),
request.method, request.body,
get_request_headers(request))
valid, oauth_req = server.validate_access_token_request(oauth_req)
except ValueError:
valid = False
if valid:
req_t = Token.objects.get(
token_type=REQUEST_TOKEN,
key=oauth_req.resource_owner_key)
t = Token.generate_new(
token_type=ACCESS_TOKEN,
creds=req_t.creds,
user=req_t.user)
# Clean up as we go.
req_t.delete()
return HttpResponse(
urlencode({'oauth_token': t.key,
'oauth_token_secret': t.secret}),
content_type='application/x-www-form-urlencoded')
else:
log.error('Invalid OAuth request for acquiring access token')
return HttpResponse(status=401)
@csrf_exempt
def token_request(request):
try:
oauth_req = server._create_request(request.build_absolute_uri(),
request.method, request.body,
get_request_headers(request))
valid, oauth_req = server.validate_request_token_request(oauth_req)
except ValueError:
valid = False
if valid:
consumer = Access.objects.get(key=oauth_req.client_key)
t = Token.generate_new(token_type=REQUEST_TOKEN, creds=consumer)
return HttpResponse(
urlencode({'oauth_token': t.key,
'oauth_token_secret': t.secret,
'oauth_callback_confirmed': True}),
content_type='application/x-www-form-urlencoded')
else:
log.error('Invalid OAuth request for acquiring request token')
return HttpResponse(status=401)
@csrf_exempt
@login_required
def authorize(request):
if request.method == 'GET' and 'oauth_token' in request.GET:
try:
t = Token.objects.get(token_type=REQUEST_TOKEN,
key=request.GET['oauth_token'])
except Token.DoesNotExist:
log.error('Invalid OAuth request for obtaining user authorization')
return HttpResponse(status=401)
return render(request, 'developers/oauth_authorize.html',
{'app_name': t.creds.app_name,
'oauth_token': request.GET['oauth_token']})
elif request.method == 'POST':
token = request.POST.get('oauth_token')
try:
t = Token.objects.get(token_type=REQUEST_TOKEN,
key=token)
except Token.DoesNotExist:
return HttpResponse(status=401)
if 'grant' in request.POST:
t.user = request.user
t.save()
return HttpResponseRedirect(
urlparams(t.creds.redirect_uri, oauth_token=token,
oauth_verifier=t.verifier))
elif 'deny' in request.POST:
t.delete()
return HttpResponse(status=200)
else:
log.error('Invalid OAuth request for user access authorization')
return HttpResponse(status=401)
|
the-stack_106_32180 | # pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import json
from pathlib import Path
from typing import Dict
import pytest
import yaml
@pytest.fixture
def port_type() -> str:
return ""
@pytest.fixture
def label_cfg(metadata_file: Path, port_type: str) -> Dict:
ports_type = f"{port_type}s"
with metadata_file.open() as fp:
cfg = yaml.safe_load(fp)
assert ports_type in cfg
return cfg[ports_type]
@pytest.fixture
def validation_folder(validation_dir: Path, port_type: str) -> Path:
return validation_dir / port_type
@pytest.fixture
def validation_cfg(validation_dir: Path, port_type: str) -> Dict:
validation_file = validation_dir / port_type / (f"{port_type}s.json")
if validation_file.exists():
with validation_file.open() as fp:
return json.load(fp)
# it may not exist if only files are required
return None
def _find_key_in_cfg(filename: str, value: Dict) -> str:
for k, v in value.items():
if k == filename:
if isinstance(v, dict):
assert "data:" in v["type"]
yield k
else:
yield v
elif isinstance(v, dict):
for result in _find_key_in_cfg(filename, v):
yield result
@pytest.mark.parametrize("port_type", ["input", "output"])
def test_validation_data_follows_definition(
label_cfg: Dict, validation_cfg: Dict, validation_folder: Path
):
for key, value in label_cfg.items():
assert "type" in value
# rationale: files are on their own and other types are in inputs.json
if not "data:" in value["type"]:
# check that keys are available
assert key in validation_cfg
else:
# it's a file and it should be in the folder as well using key as the filename
filename_to_look_for = key
if "fileToKeyMap" in value:
# ...or there is a mapping
assert len(value["fileToKeyMap"]) > 0
for filename, mapped_value in value["fileToKeyMap"].items():
assert mapped_value == key
filename_to_look_for = filename
assert (validation_folder / filename_to_look_for).exists()
else:
assert (validation_folder / filename_to_look_for).exists()
if validation_cfg:
for key, value in validation_cfg.items():
# check the key is defined in the labels
assert key in label_cfg
label2types = {
"number": (float, int),
"integer": int,
"boolean": bool,
"string": str,
}
if not "data:" in label_cfg[key]["type"]:
# check the type is correct
assert isinstance(value, label2types[label_cfg[key]["type"]])
for path in validation_folder.glob("**/*"):
if path.name in ["inputs.json", "outputs.json", ".gitkeep"]:
continue
assert path.is_file()
filename = path.name
# this filename shall be available as a key in the labels somewhere
key = next(_find_key_in_cfg(str(filename), label_cfg))
assert key in label_cfg
|
the-stack_106_32181 | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsip6(base_resource) :
""" Configuration for ip6 resource. """
def __init__(self) :
self._ipv6address = None
self._scope = None
self._type = None
self._vlan = None
self._nd = None
self._icmp = None
self._vserver = None
self._telnet = None
self._ftp = None
self._gui = None
self._ssh = None
self._snmp = None
self._mgmtaccess = None
self._restrictaccess = None
self._dynamicrouting = None
self._decrementhoplimit = None
self._hostroute = None
self._advertiseondefaultpartition = None
self._networkroute = None
self._tag = None
self._ip6hostrtgw = None
self._metric = None
self._vserverrhilevel = None
self._ospf6lsatype = None
self._ospfarea = None
self._state = None
self._map = None
self._vrid6 = None
self._ownernode = None
self._ownerdownresponse = None
self._td = None
self._ndowner = None
self._mptcpadvertise = None
self._iptype = None
self._curstate = None
self._viprtadv2bsd = None
self._vipvsercount = None
self._vipvserdowncount = None
self._systemtype = None
self._operationalndowner = None
self.___count = None
@property
def ipv6address(self) :
r"""IPv6 address to create on the Citrix ADC.<br/>Minimum length = 1.
"""
try :
return self._ipv6address
except Exception as e:
raise e
@ipv6address.setter
def ipv6address(self, ipv6address) :
r"""IPv6 address to create on the Citrix ADC.<br/>Minimum length = 1
"""
try :
self._ipv6address = ipv6address
except Exception as e:
raise e
@property
def scope(self) :
r"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
r"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local
"""
try :
self._scope = scope
except Exception as e:
raise e
@property
def type(self) :
r"""Type of IP address to be created on the Citrix ADC. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, RADIUSListenersvcIP, CLIP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
r"""Type of IP address to be created on the Citrix ADC. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, RADIUSListenersvcIP, CLIP
"""
try :
self._type = type
except Exception as e:
raise e
@property
def vlan(self) :
r"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094.
"""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
r"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def nd(self) :
r"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._nd
except Exception as e:
raise e
@nd.setter
def nd(self, nd) :
r"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._nd = nd
except Exception as e:
raise e
@property
def icmp(self) :
r"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._icmp
except Exception as e:
raise e
@icmp.setter
def icmp(self, icmp) :
r"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._icmp = icmp
except Exception as e:
raise e
@property
def vserver(self) :
r"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
r"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def telnet(self) :
r"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._telnet
except Exception as e:
raise e
@telnet.setter
def telnet(self, telnet) :
r"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._telnet = telnet
except Exception as e:
raise e
@property
def ftp(self) :
r"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ftp
except Exception as e:
raise e
@ftp.setter
def ftp(self, ftp) :
r"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ftp = ftp
except Exception as e:
raise e
@property
def gui(self) :
r"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED.
"""
try :
return self._gui
except Exception as e:
raise e
@gui.setter
def gui(self, gui) :
r"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED
"""
try :
self._gui = gui
except Exception as e:
raise e
@property
def ssh(self) :
r"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ssh
except Exception as e:
raise e
@ssh.setter
def ssh(self, ssh) :
r"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ssh = ssh
except Exception as e:
raise e
@property
def snmp(self) :
r"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._snmp
except Exception as e:
raise e
@snmp.setter
def snmp(self, snmp) :
r"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._snmp = snmp
except Exception as e:
raise e
@property
def mgmtaccess(self) :
r"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._mgmtaccess
except Exception as e:
raise e
@mgmtaccess.setter
def mgmtaccess(self, mgmtaccess) :
r"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._mgmtaccess = mgmtaccess
except Exception as e:
raise e
@property
def restrictaccess(self) :
r"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying Citrix ADC Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._restrictaccess
except Exception as e:
raise e
@restrictaccess.setter
def restrictaccess(self, restrictaccess) :
r"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying Citrix ADC Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._restrictaccess = restrictaccess
except Exception as e:
raise e
@property
def dynamicrouting(self) :
r"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dynamicrouting
except Exception as e:
raise e
@dynamicrouting.setter
def dynamicrouting(self, dynamicrouting) :
r"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dynamicrouting = dynamicrouting
except Exception as e:
raise e
@property
def decrementhoplimit(self) :
r"""Decrement Hop Limit by 1 when ENABLED.This setting is applicable only for UDP traffic.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._decrementhoplimit
except Exception as e:
raise e
@decrementhoplimit.setter
def decrementhoplimit(self, decrementhoplimit) :
r"""Decrement Hop Limit by 1 when ENABLED.This setting is applicable only for UDP traffic.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._decrementhoplimit = decrementhoplimit
except Exception as e:
raise e
@property
def hostroute(self) :
r"""Option to push the VIP6 to ZebOS routing table for Kernel route redistribution through dynamic routing protocols.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._hostroute
except Exception as e:
raise e
@hostroute.setter
def hostroute(self, hostroute) :
r"""Option to push the VIP6 to ZebOS routing table for Kernel route redistribution through dynamic routing protocols.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._hostroute = hostroute
except Exception as e:
raise e
@property
def advertiseondefaultpartition(self) :
r"""Advertise VIPs from Shared VLAN on Default Partition.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._advertiseondefaultpartition
except Exception as e:
raise e
@advertiseondefaultpartition.setter
def advertiseondefaultpartition(self, advertiseondefaultpartition) :
r"""Advertise VIPs from Shared VLAN on Default Partition.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._advertiseondefaultpartition = advertiseondefaultpartition
except Exception as e:
raise e
@property
def networkroute(self) :
r"""Option to push the SNIP6 subnet to ZebOS routing table for Kernel route redistribution through dynamic routing protocol.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._networkroute
except Exception as e:
raise e
@networkroute.setter
def networkroute(self, networkroute) :
r"""Option to push the SNIP6 subnet to ZebOS routing table for Kernel route redistribution through dynamic routing protocol.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._networkroute = networkroute
except Exception as e:
raise e
@property
def tag(self) :
r"""Tag value for the network/host route associated with this IP.<br/>Default value: 0.
"""
try :
return self._tag
except Exception as e:
raise e
@tag.setter
def tag(self, tag) :
r"""Tag value for the network/host route associated with this IP.<br/>Default value: 0
"""
try :
self._tag = tag
except Exception as e:
raise e
@property
def ip6hostrtgw(self) :
r"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0.
"""
try :
return self._ip6hostrtgw
except Exception as e:
raise e
@ip6hostrtgw.setter
def ip6hostrtgw(self, ip6hostrtgw) :
r"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0
"""
try :
self._ip6hostrtgw = ip6hostrtgw
except Exception as e:
raise e
@property
def metric(self) :
r"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215.
"""
try :
return self._metric
except Exception as e:
raise e
@metric.setter
def metric(self, metric) :
r"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215
"""
try :
self._metric = metric
except Exception as e:
raise e
@property
def vserverrhilevel(self) :
r"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the Citrix ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the Citrix ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the Citrix ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD.
"""
try :
return self._vserverrhilevel
except Exception as e:
raise e
@vserverrhilevel.setter
def vserverrhilevel(self, vserverrhilevel) :
r"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the Citrix ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the Citrix ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the Citrix ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD
"""
try :
self._vserverrhilevel = vserverrhilevel
except Exception as e:
raise e
@property
def ospf6lsatype(self) :
r"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the Citrix ADC, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL.
"""
try :
return self._ospf6lsatype
except Exception as e:
raise e
@ospf6lsatype.setter
def ospf6lsatype(self, ospf6lsatype) :
r"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the Citrix ADC, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL
"""
try :
self._ospf6lsatype = ospf6lsatype
except Exception as e:
raise e
@property
def ospfarea(self) :
r"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the Citrix ADC. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU.
"""
try :
return self._ospfarea
except Exception as e:
raise e
@ospfarea.setter
def ospfarea(self, ospfarea) :
r"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the Citrix ADC. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU
"""
try :
self._ospfarea = ospfarea
except Exception as e:
raise e
@property
def state(self) :
r"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
r"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def map(self) :
r"""Mapped IPV4 address for the IPV6 address.
"""
try :
return self._map
except Exception as e:
raise e
@map.setter
def map(self, map) :
r"""Mapped IPV4 address for the IPV6 address.
"""
try :
self._map = map
except Exception as e:
raise e
@property
def vrid6(self) :
r"""A positive integer that uniquely identifies a VMAC address for binding to this VIP address. This binding is used to set up Citrix ADCs in an active-active configuration using VRRP.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._vrid6
except Exception as e:
raise e
@vrid6.setter
def vrid6(self, vrid6) :
r"""A positive integer that uniquely identifies a VMAC address for binding to this VIP address. This binding is used to set up Citrix ADCs in an active-active configuration using VRRP.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._vrid6 = vrid6
except Exception as e:
raise e
@property
def ownernode(self) :
r"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
r"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def ownerdownresponse(self) :
r"""in cluster system, if the owner node is down, whether should it respond to icmp/arp.<br/>Default value: YES<br/>Possible values = YES, NO.
"""
try :
return self._ownerdownresponse
except Exception as e:
raise e
@ownerdownresponse.setter
def ownerdownresponse(self, ownerdownresponse) :
r"""in cluster system, if the owner node is down, whether should it respond to icmp/arp.<br/>Default value: YES<br/>Possible values = YES, NO
"""
try :
self._ownerdownresponse = ownerdownresponse
except Exception as e:
raise e
@property
def td(self) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def ndowner(self) :
r"""NdOwner in Cluster for VIPS and Striped SNIPS.<br/>Default value: 255.
"""
try :
return self._ndowner
except Exception as e:
raise e
@ndowner.setter
def ndowner(self, ndowner) :
r"""NdOwner in Cluster for VIPS and Striped SNIPS.<br/>Default value: 255
"""
try :
self._ndowner = ndowner
except Exception as e:
raise e
@property
def mptcpadvertise(self) :
r"""If enabled, this IP will be advertised by Citrix ADC to MPTCP enabled clients as part of ADD_ADDR option.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._mptcpadvertise
except Exception as e:
raise e
@mptcpadvertise.setter
def mptcpadvertise(self, mptcpadvertise) :
r"""If enabled, this IP will be advertised by Citrix ADC to MPTCP enabled clients as part of ADD_ADDR option.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._mptcpadvertise = mptcpadvertise
except Exception as e:
raise e
@property
def iptype(self) :
r"""The type of the IPv6 address.<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, RADIUSListenersvcIP, CLIP.
"""
try :
return self._iptype
except Exception as e:
raise e
@property
def curstate(self) :
r"""Current state of this IP.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._curstate
except Exception as e:
raise e
@property
def viprtadv2bsd(self) :
r"""Whether this route is advertised to FreeBSD.
"""
try :
return self._viprtadv2bsd
except Exception as e:
raise e
@property
def vipvsercount(self) :
r"""Number of vservers bound to this VIP.
"""
try :
return self._vipvsercount
except Exception as e:
raise e
@property
def vipvserdowncount(self) :
r"""Number of vservers bound to this VIP, which are down.
"""
try :
return self._vipvserdowncount
except Exception as e:
raise e
@property
def systemtype(self) :
r"""The type of the System. Possible Values: Standalone, HA, Cluster. Used for display purpose.<br/>Possible values = Stand-alone, HA, Cluster.
"""
try :
return self._systemtype
except Exception as e:
raise e
@property
def operationalndowner(self) :
r"""Operational ND6 Owner.
"""
try :
return self._operationalndowner
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsip6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsip6
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.ipv6address is not None :
return str(self.ipv6address)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = nsip6()
addresource.ipv6address = resource.ipv6address
addresource.scope = resource.scope
addresource.type = resource.type
addresource.vlan = resource.vlan
addresource.nd = resource.nd
addresource.icmp = resource.icmp
addresource.vserver = resource.vserver
addresource.telnet = resource.telnet
addresource.ftp = resource.ftp
addresource.gui = resource.gui
addresource.ssh = resource.ssh
addresource.snmp = resource.snmp
addresource.mgmtaccess = resource.mgmtaccess
addresource.restrictaccess = resource.restrictaccess
addresource.dynamicrouting = resource.dynamicrouting
addresource.decrementhoplimit = resource.decrementhoplimit
addresource.hostroute = resource.hostroute
addresource.advertiseondefaultpartition = resource.advertiseondefaultpartition
addresource.networkroute = resource.networkroute
addresource.tag = resource.tag
addresource.ip6hostrtgw = resource.ip6hostrtgw
addresource.metric = resource.metric
addresource.vserverrhilevel = resource.vserverrhilevel
addresource.ospf6lsatype = resource.ospf6lsatype
addresource.ospfarea = resource.ospfarea
addresource.state = resource.state
addresource.map = resource.map
addresource.vrid6 = resource.vrid6
addresource.ownernode = resource.ownernode
addresource.ownerdownresponse = resource.ownerdownresponse
addresource.td = resource.td
addresource.ndowner = resource.ndowner
addresource.mptcpadvertise = resource.mptcpadvertise
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add nsip6.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = nsip6()
deleteresource.ipv6address = resource.ipv6address
deleteresource.td = resource.td
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete nsip6.
"""
try :
if type(resource) is not list :
deleteresource = nsip6()
if type(resource) != type(deleteresource):
deleteresource.ipv6address = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = nsip6()
updateresource.ipv6address = resource.ipv6address
updateresource.td = resource.td
updateresource.nd = resource.nd
updateresource.icmp = resource.icmp
updateresource.vserver = resource.vserver
updateresource.telnet = resource.telnet
updateresource.ftp = resource.ftp
updateresource.gui = resource.gui
updateresource.ssh = resource.ssh
updateresource.snmp = resource.snmp
updateresource.mgmtaccess = resource.mgmtaccess
updateresource.ownerdownresponse = resource.ownerdownresponse
updateresource.restrictaccess = resource.restrictaccess
updateresource.state = resource.state
updateresource.map = resource.map
updateresource.decrementhoplimit = resource.decrementhoplimit
updateresource.dynamicrouting = resource.dynamicrouting
updateresource.hostroute = resource.hostroute
updateresource.advertiseondefaultpartition = resource.advertiseondefaultpartition
updateresource.networkroute = resource.networkroute
updateresource.ip6hostrtgw = resource.ip6hostrtgw
updateresource.metric = resource.metric
updateresource.vserverrhilevel = resource.vserverrhilevel
updateresource.ospf6lsatype = resource.ospf6lsatype
updateresource.ospfarea = resource.ospfarea
updateresource.tag = resource.tag
updateresource.vrid6 = resource.vrid6
updateresource.ndowner = resource.ndowner
updateresource.mptcpadvertise = resource.mptcpadvertise
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update nsip6.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of nsip6 resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nsip6()
if type(resource) != type(unsetresource):
unsetresource.ipv6address = resource
else :
unsetresource.ipv6address = resource.ipv6address
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i].ipv6address
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the nsip6 resources that are configured on netscaler.
"""
try :
if not name :
obj = nsip6()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) != cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) != cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [nsip6() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of nsip6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the nsip6 resources configured on NetScaler.
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of nsip6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Iptype:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
RADIUSListenersvcIP = "RADIUSListenersvcIP"
CLIP = "CLIP"
class Networkroute:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ssh:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Ospf6lsatype:
INTRA_AREA = "INTRA_AREA"
EXTERNAL = "EXTERNAL"
class Decrementhoplimit:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Mptcpadvertise:
YES = "YES"
NO = "NO"
class Scope:
GLOBAL = "global"
link_local = "link-local"
class Nd:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Systemtype:
Stand_alone = "Stand-alone"
HA = "HA"
Cluster = "Cluster"
class Gui:
ENABLED = "ENABLED"
SECUREONLY = "SECUREONLY"
DISABLED = "DISABLED"
class Dynamicrouting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
RADIUSListenersvcIP = "RADIUSListenersvcIP"
CLIP = "CLIP"
class Mgmtaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Hostroute:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ftp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ownerdownresponse:
YES = "YES"
NO = "NO"
class Vserverrhilevel:
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
NONE = "NONE"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Icmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserver:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Advertiseondefaultpartition:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Snmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Curstate:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Restrictaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Telnet:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class nsip6_response(base_response) :
def __init__(self, length=1) :
self.nsip6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsip6 = [nsip6() for _ in range(length)]
|
the-stack_106_32182 | from kivy_ios.toolchain import HostRecipe, shprint
from os.path import join
import sh
import logging
logger = logging.getLogger(__name__)
arch_mapper = {'x86_64': 'darwin64-x86_64-cc',
'arm64': 'darwin64-arm64-cc'}
class HostOpensslRecipe(HostRecipe):
version = "1.1.1l"
url = "http://www.openssl.org/source/openssl-{version}.tar.gz"
def get_build_env(self):
build_env = self.ctx.env.copy()
self.build_env = build_env
return build_env
def build_arch(self, arch):
build_env = self.get_build_env()
configure = sh.Command(join(self.build_dir, "Configure"))
shprint(configure,
arch_mapper[arch.arch],
_env=build_env)
shprint(sh.make, "clean")
shprint(sh.make, self.ctx.concurrent_make, "build_libs")
def install(self):
arch = self.archs[0]
sh.mkdir('-p', join(self.ctx.dist_dir, 'hostopenssl'))
sh.cp('-r', join(self.get_build_dir(arch), 'include'),
join(self.ctx.dist_dir, 'hostopenssl', 'include'))
sh.mkdir('-p', join(self.ctx.dist_dir, 'hostopenssl', 'lib'))
sh.cp(join(self.get_build_dir(arch), 'libssl.a'),
join(self.ctx.dist_dir, 'hostopenssl', 'lib'))
sh.cp(join(self.get_build_dir(arch), 'libcrypto.a'),
join(self.ctx.dist_dir, 'hostopenssl', 'lib'))
recipe = HostOpensslRecipe()
|
the-stack_106_32184 | import pytest
from django.urls import resolve, reverse
from ecommerce.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
the-stack_106_32186 | from operator import itemgetter
cancerlist = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD", "DLBC", "ESCA", "GBM", "HNSC", "KICH", "KIRC", "KIRP", "LGG", "LIHC", "LUAD", "LUSC", "MESO", "OV", "PAAD", "PCPG", "PRAD", "READ", "SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"]
#cancerlist = ["PANCANCER"]
input_file1 = []
input_file2 = []
probe_count = 485577
p_threshold = [0.05, 0.005, 0.0005, 0.0000001]
sample_id = []
cytoact = []
sample_index = []
def GetSample() :
cytoact_file = open("TCGA_methylation_cowork_1.txt", 'r')
header = cytoact_file.readline().split() # getting header
id_posit = header.index("id") # sample ID positioning
cytoact_posit = header.index("CytAct") # CytAct positioning
cytodata = cytoact_file.readlines() # read data table
cytoact_file.close()
count = 0
global sample_id
global cytoact
for line in cytodata :
line = line.split()
sample_id.append(line[id_posit].replace('_', '')) # sample ID extraction
cytoact.append(float(line[cytoact_posit])) # CytAct extraction
count += 1
return count # Sample number return
sample_number = GetSample()
percentage = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.275, 0.3]
for i in range(0, len(cancerlist)) :
input_tumor = open(cancerlist[i] + ".humanmethylation450.tumor.txt", 'r')
sample_header1 = input_tumor.readline().split() # sample line
input_tumor.readline() # junk line
############################################################################################################################################################################
# make sample index table
del sample_header1[0]; del sample_header1[0]
sample_index = []
sample_binary_table = []
length = len(sample_header1)
for j in range(0, length) :
sample_header1[j] = sample_header1[j][:15].replace('-', '')
if(sample_header1[j] in sample_id) : sample_index.append(sample_id.index(sample_header1[j]))
else : sample_index.append(-1)
for j in range(len(p_threshold)) :
sample_binary_table.append([])
for k in range(len(percentage)) :
sample_binary_table[j].append([])
for l in range(length) : sample_binary_table[j][k].append(0)
############################################################################################################################################################################
whole_skew = []; whole_skew_index = []
for j in range(len(p_threshold)) :
input_file = open(str(p_threshold[j]) + "." + cancerlist[i] + ".CpGsites.By.TTest.txt", 'r')
input_file.readline() # junk line
whole_skew.append([])
lines = input_file.readlines()
for line in lines : # Derivation of meaningful CpG sites
line = line.split()
t_stat = float(line[1])
whole_skew[j].append(line[0])
whole_skew[j].append("END_POINT")
whole_skew_index.append(0)
for j in range(probe_count) :
line1 = input_tumor.readline().split()
site_id = line1.pop(0)
############################################################################################################################################################################
# getting betavalue for each cpg site
betavalue_row = []
new_length = length
for k in range(0, length) :
if(line1[k] == "NA" or sample_index[k] == -1) :
new_length -= 1
continue
betavalue_row.append([float(line1[k]), k])
betavalue_row.sort(key = itemgetter(0))
############################################################################################################################################################################
if(new_length > 0) :
for k in range(len(p_threshold)) :
if(whole_skew[k][whole_skew_index[k]] == site_id) :
betavalue_median = betavalue_row[new_length / 2][0]
if(new_length % 2 == 0) : betavalue_median = (betavalue_median + betavalue_row[new_length / 2 - 1][0]) / 2
if(betavalue_median <= 0.5) :
for percentage_i in range(len(percentage)) :
threshold = int(new_length * percentage[percentage_i])
for l in range(threshold) : sample_binary_table[k][percentage_i][betavalue_row[new_length - l - 1][1]] += 1
else :
for percentage_i in range(len(percentage)) :
threshold = int(new_length * percentage[percentage_i])
for l in range(threshold) : sample_binary_table[k][percentage_i][betavalue_row[l][1]] += 1
whole_skew_index[k] += 1
if(j % 10000 == 0) :
print(cancerlist[i] + " %d completed." % j)
for j in range(len(p_threshold)) :
for k in range(len(percentage)) :
output_file = open("Pvalue." + str(p_threshold[j]) + ".Percentage." + str(percentage[k]) + "." + cancerlist[i] + ".Negative.Binarization.Summation.Excluding.Invalid.Samples.txt", 'w')
for l in range(length) :
if(sample_index[l] == -1) : continue
printline = sample_header1[l] + "\t%s\n" % str(sample_binary_table[j][k][l])
output_file.write(printline)
|
the-stack_106_32187 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Union
import numpy as np
import torch
from monai.metrics.utils import do_metric_reduction, get_mask_edges, get_surface_distance, ignore_background
from monai.utils import MetricReduction
from .metric import CumulativeIterationMetric
class SurfaceDistanceMetric(CumulativeIterationMetric):
"""
Compute Surface Distance between two tensors. It can support both multi-classes and multi-labels tasks.
It supports both symmetric and asymmetric surface distance calculation.
Input `y_pred` is compared with ground truth `y`.
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format.
You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
Args:
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
"""
def __init__(
self,
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__()
self.include_background = include_background
self.distance_metric = distance_metric
self.symmetric = symmetric
self.reduction = reduction
self.get_not_nans = get_not_nans
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor): # type: ignore
"""
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
Raises:
ValueError: when `y` is not a binarized tensor.
ValueError: when `y_pred` has less than three dimensions.
"""
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("y_pred and y must be PyTorch Tensor.")
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred should be a binarized tensor.")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
# compute (BxC) for each channel for each batch
return compute_average_surface_distance(
y_pred=y_pred,
y=y,
include_background=self.include_background,
symmetric=self.symmetric,
distance_metric=self.distance_metric,
)
def aggregate(self): # type: ignore
"""
Execute reduction logic for the output of `compute_average_surface_distance`.
"""
data = self.get_buffer()
if not isinstance(data, torch.Tensor):
raise ValueError("the data to aggregate must be PyTorch Tensor.")
# do metric reduction
f, not_nans = do_metric_reduction(data, self.reduction)
return (f, not_nans) if self.get_not_nans else f
def compute_average_surface_distance(
y_pred: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `y_pred` to `y`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
The implementation refers to `DeepMind's implementation <https://github.com/deepmind/surface-distance>`_.
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
if not include_background:
y_pred, y = ignore_background(y_pred=y_pred, y=y)
if isinstance(y, torch.Tensor):
y = y.float()
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
batch_size, n_class = y_pred.shape[:2]
asd = np.empty((batch_size, n_class))
for b, c in np.ndindex(batch_size, n_class):
(edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
if not np.any(edges_gt):
warnings.warn(f"the ground truth of class {c} is all 0, this may result in nan/inf distance.")
if not np.any(edges_pred):
warnings.warn(f"the prediction of class {c} is all 0, this may result in nan/inf distance.")
surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)
if surface_distance.shape == (0,):
avg_surface_distance = np.nan
else:
avg_surface_distance = surface_distance.mean() # type: ignore
if not symmetric:
asd[b, c] = avg_surface_distance
else:
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
avg_surface_distance_2 = np.nan
else:
avg_surface_distance_2 = surface_distance_2.mean() # type: ignore
asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))
return torch.from_numpy(asd)
|
the-stack_106_32191 | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = ['tests.django']
ROOT_URLCONF = 'tests.django.urls'
SECRET_KEY = 'commenter_tests_secret_key'
|
the-stack_106_32193 | # name: pyALL
# created: May 2018
# by: [email protected]
# description: python module to read a Kongsberg ALL sonar file
# notes: See main at end of script for example how to use this
# based on ALL Revision R October 2013
# https://raw.githubusercontent.com/ausseabed/pyall/master/pyall/pyall.py
# See readme.md for more details
import ctypes
import math
import pprint
import struct
import os.path
import time
from datetime import datetime
from datetime import timedelta
import numpy as np
def main():
# open the ALL file for reading by creating a new ALLReader class and
# passing in the filename to open.
filename = "C:/development/Python/Sample.all"
r = ALLReader(filename)
pingCount = 0
start_time = time.time() # time the process
# navigation = r.loadNavigation()
# time the process
# print("Load Navigation Duration: %.2fs" % (time.time() - start_time))
# print (navigation)
while r.moreData():
# read a datagram. If we support it, return the datagram type and a
# class for that datagram
# The user then needs to call the read() method for the class to
# undertake a fileread and binary decode. This keeps the read super
# quick.
typeOfDatagram, datagram = r.readDatagram()
print(typeOfDatagram, end="")
rawbytes = r.readDatagramBytes(datagram.offset, datagram.numberOfBytes)
# hereis how we compute the checksum
# print(sum(rawbytes[5:-3]))
if typeOfDatagram == "3":
datagram.read()
print(datagram.data)
continue
if typeOfDatagram == "A":
datagram.read()
# for a in datagram.Attitude:
# print ("%.5f, %.3f, %.3f, %.3f, %.3f" % (r.to_timestamp(r.to_DateTime(a[0], a[1])), a[3], a[4], a[5], a[6]))
continue
if typeOfDatagram == "C":
datagram.read()
continue
if typeOfDatagram == "D":
datagram.read()
nadirBeam = int(datagram.NBeams / 2)
# print (("Nadir Depth: %.3f AcrossTrack %.3f TransducerDepth %.3f Checksum %s" % (datagram.Depth[nadirBeam], datagram.AcrossTrackDistance[nadirBeam], datagram.TransducerDepth, datagram.checksum)))
continue
if typeOfDatagram == "f":
datagram.read()
if typeOfDatagram == "H":
datagram.read()
if typeOfDatagram == "i":
datagram.read()
continue
if typeOfDatagram == "I":
datagram.read()
# print (datagram.installationParameters)
# print ("Lat: %.5f Lon: %.5f" % (datagram.Latitude, datagram.Longitude))
continue
if typeOfDatagram == "n":
datagram.read()
continue
if typeOfDatagram == "N":
datagram.read()
# print ("Raw Travel Times Recorded for %d beams" % datagram.NumReceiveBeams)
continue
if typeOfDatagram == "O":
datagram.read()
continue
if typeOfDatagram == "R":
datagram.read()
continue
if typeOfDatagram == "U":
datagram.read()
continue
if typeOfDatagram == "X":
datagram.read()
nadirBeam = int(datagram.NBeams / 2)
# print (("Nadir Depth: %.3f AcrossTrack %.3f TransducerDepth %.3f" % (datagram.Depth[nadirBeam], datagram.AcrossTrackDistance[nadirBeam], datagram.TransducerDepth)))
pingCount += 1
continue
if typeOfDatagram == "Y":
datagram.read()
continue
if typeOfDatagram == "k":
datagram.read()
continue
# print the processing time. It is handy to keep an eye on processing
# performance.
print(
"Read Duration: %.3f seconds, pingCount %d"
% (time.time() - start_time, pingCount)
)
r.rewind()
print("Complete reading ALL file :-)")
r.close()
class ALLReader:
"""class to read a Kongsberg EM multibeam .all file"""
# `<` means little endian, following note comes from Kongsberg
#
# Please be aware that the following echo sounders: EM 3002, EM 710,
# EM 302, EM 122, EM 2040, EM 2040C and ME70BO use little endian byte
# order.
ALLPacketHeader_fmt = "<LBBHLLHH"
ALLPacketHeader_len = struct.calcsize(ALLPacketHeader_fmt)
ALLPacketHeader_unpack = struct.Struct(ALLPacketHeader_fmt).unpack_from
def __init__(self, ALLfileName):
if not os.path.isfile(ALLfileName):
print("file not found:", ALLfileName)
self.fileName = ALLfileName
self.fileptr = open(ALLfileName, "rb")
self.fileSize = os.path.getsize(ALLfileName)
self.recordDate = ""
self.recordTime = ""
self.clockCounter = None
self.serialNumber = None
self.recordCounter = 0
def __str__(self):
return pprint.pformat(vars(self))
def currentRecordDateTime(self):
"""return a python date object from the current datagram objects raw
date and time fields """
date_object = datetime.strptime(str(self.recordDate), "%Y%m%d") + timedelta(
0, self.recordTime
)
return date_object
def to_DateTime(self, recordDate, recordTime):
"""return a python date object from a split date and time record"""
date_object = datetime.strptime(str(recordDate), "%Y%m%d") + timedelta(
0, recordTime
)
return date_object
# def to_timestamp(self, dateObject):
# '''return a unix timestamp from a python date object'''
# return (dateObject - datetime(1970, 1, 1)).total_seconds()
def close(self):
"""close the current file"""
self.fileptr.close()
def rewind(self):
"""go back to start of file"""
self.fileptr.seek(0, 0)
def currentPtr(self):
"""report where we are in the file reading process"""
return self.fileptr.tell()
def moreData(self):
"""report how many more bytes there are to read from the file"""
return self.fileSize - self.fileptr.tell()
def readDatagramHeader(self):
"""read the common header for any datagram"""
try:
curr = self.fileptr.tell()
data = self.fileptr.read(self.ALLPacketHeader_len)
s = self.ALLPacketHeader_unpack(data)
numberOfBytes = s[0]
STX = s[1]
typeOfDatagram = chr(s[2])
EMModel = s[3]
RecordDate = s[4]
RecordTime = float(s[5] / 1000.0)
ClockCounter = s[6]
SerialNumber = s[7]
self.recordDate = RecordDate
self.recordTime = RecordTime
self.clockCounter = ClockCounter
self.serialNumber = SerialNumber
# now reset file pointer
self.fileptr.seek(curr, 0)
# we need to add 4 bytes as the message does not contain the
# 4 bytes used to hold the size of the message trap corrupt
# datagrams at the end of a file. We see this in EM2040 systems.
if (curr + numberOfBytes + 4) > self.fileSize:
numberOfBytes = self.fileSize - curr - 4
typeOfDatagram = "XXX"
return (
numberOfBytes + 4,
STX,
typeOfDatagram,
EMModel,
RecordDate,
RecordTime,
ClockCounter,
SerialNumber,
)
return (
numberOfBytes + 4,
STX,
typeOfDatagram,
EMModel,
RecordDate,
RecordTime,
ClockCounter,
SerialNumber,
)
except struct.error:
return 0, 0, 0, 0, 0, 0, 0, 0
def readDatagramBytes(self, offset, byteCount):
"""read the entire raw bytes for the datagram without changing the
file pointer. this is used for file conditioning"""
curr = self.fileptr.tell()
# move the file pointer to the start of the record so we can read
# from disc
self.fileptr.seek(offset, 0)
data = self.fileptr.read(byteCount)
self.fileptr.seek(curr, 0)
return data
def getRecordCount(self):
"""read through the entire file as fast as possible to get a count of
all records. useful for progress bars so user can see what is
happening"""
count = 0
start = 0
end = 0
self.rewind()
(
numberOfBytes,
STX,
typeOfDatagram,
EMModel,
RecordDate,
RecordTime,
ClockCounter,
SerialNumber,
) = self.readDatagramHeader()
start = to_timestamp(to_DateTime(RecordDate, RecordTime))
self.rewind()
while self.moreData():
(
numberOfBytes,
STX,
typeOfDatagram,
EMModel,
RecordDate,
RecordTime,
ClockCounter,
SerialNumber,
) = self.readDatagramHeader()
self.fileptr.seek(numberOfBytes, 1)
count += 1
self.rewind()
end = to_timestamp(to_DateTime(RecordDate, RecordTime))
return count, start, end
def readDatagram(self):
"""read the datagram header. This permits us to skip datagrams we do
not support"""
(
numberOfBytes,
STX,
typeOfDatagram,
EMModel,
RecordDate,
RecordTime,
ClockCounter,
SerialNumber,
) = self.readDatagramHeader()
self.recordCounter += 1
if typeOfDatagram == "3": # 3_EXTRA PARAMETERS DECIMAL 51
dg = E_EXTRA(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "A": # A ATTITUDE
dg = A_ATTITUDE(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "C": # C Clock
dg = C_CLOCK(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "D": # D DEPTH
dg = D_DEPTH(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "f": # f Raw Range
dg = f_RAWRANGE(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "G": # G Speed Sound at Head
dg = G_SPEEDSOUNDATHEAD(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "h": # h Height, do not confuse with H_Heading!
dg = h_HEIGHT(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "I": # I Installation (Start)
dg = I_INSTALLATION(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "i": # i Installation (Stop)
dg = I_INSTALLATION(self.fileptr, numberOfBytes)
dg.typeOfDatagram = "i" # override with the install stop code
return dg.typeOfDatagram, dg
if typeOfDatagram == "n": # n ATTITUDE
dg = n_ATTITUDE(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "N": # N Angle and Travel Time
dg = N_TRAVELTIME(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "O": # O_QUALITYFACTOR
dg = O_QUALITYFACTOR(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "R": # R_RUNTIME
dg = R_RUNTIME(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "P": # P Position
dg = P_POSITION(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "U": # U Sound Velocity
dg = U_SVP(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "X": # X Depth
dg = X_DEPTH(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "Y": # Y_SeabedImage
dg = Y_SEABEDIMAGE(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "k": # k_WaterColumn
dg = k_WATERCOLUMN(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
if typeOfDatagram == "S": # S_Seabed Image
dg = S_SEABEDIMAGE(self.fileptr, numberOfBytes)
return dg.typeOfDatagram, dg
else:
dg = UNKNOWN_RECORD(self.fileptr, numberOfBytes, typeOfDatagram)
return dg.typeOfDatagram, dg
# self.fileptr.seek(numberOfBytes, 1)
def loadInstallationRecords(self):
"""loads all the installation into lists"""
installStart = None
installStop = None
# initialMode = None
datagram = None
self.rewind()
while self.moreData():
typeOfDatagram, datagram = self.readDatagram()
if typeOfDatagram == "I":
installStart = self.readDatagramBytes(datagram.offset, datagram.numberOfBytes)
datagram.read()
break
if typeOfDatagram == "i":
installStop = self.readDatagramBytes(datagram.offset, datagram.numberOfBytes)
#break
self.rewind()
return installStart, installStop, datagram
def loadCenterFrequency(self):
"""determine the central frequency of the first record in the file"""
centerFrequency = 0
self.rewind()
while self.moreData():
typeOfDatagram, datagram = self.readDatagram()
if typeOfDatagram == "N":
datagram.read()
centerFrequency = datagram.CentreFrequency[0]
break
self.rewind()
return centerFrequency
def loadDepthMode(self):
"""determine the central frequency of the first record in the file"""
initialDepthMode = ""
self.rewind()
while self.moreData():
typeOfDatagram, datagram = self.readDatagram()
if typeOfDatagram == "R":
datagram.read()
initialDepthMode = datagram.DepthMode
break
self.rewind()
return initialDepthMode
def loadNavigation_runtime(self, firstRecordOnly=False):
'''loads all the navigation into lists
loads all the runtimes into lists (magia)
'''
navigation = []
runtime = {}
lat = 0
long = 0
selectedPositioningSystem = None
self.rewind()
while self.moreData():
typeOfDatagram, datagram = self.readDatagram()
if (typeOfDatagram == 'P'):
datagram.read()
recDate = self.currentRecordDateTime()
if (selectedPositioningSystem == None):
selectedPositioningSystem = datagram.Descriptor
if (selectedPositioningSystem == datagram.Descriptor):
# for python 2.7
lat = datagram.Latitude
long = datagram.Longitude
navigation.append([to_timestamp(recDate), datagram.Latitude, datagram.Longitude])
# for python 3.4
#navigation.append([recDate.timestamp(), datagram.Latitude, datagram.Longitude])
if firstRecordOnly: #we only want the first record, so reset the file pointer and quit
self.rewind()
return navigation
if (typeOfDatagram == 'R'): # Get First
datagram.read()
recDate = self.currentRecordDateTime().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
#datagram.parameters()
runtime[recDate + ',' + str(datagram.Counter) + ',' + str(lat) + ',' + str(long)] = datagram.parameters()
self.rewind()
return navigation, runtime
def loadNavigation(self, firstRecordOnly=False):
"""loads all the navigation into lists"""
navigation = []
selectedPositioningSystem = None
self.rewind()
while self.moreData():
typeOfDatagram, datagram = self.readDatagram()
if typeOfDatagram == "P":
datagram.read()
recDate = self.currentRecordDateTime()
if selectedPositioningSystem is None:
selectedPositioningSystem = datagram.Descriptor
if selectedPositioningSystem == datagram.Descriptor:
# for python 2.7
navigation.append(
[to_timestamp(recDate), datagram.Latitude, datagram.Longitude]
)
# for python 3.4
# navigation.append(
# [recDate.timestamp(),
# datagram.Latitude,
# datagram.Longitude
# ])
# we only want the first record, so reset the file pointer
# and quit
if firstRecordOnly:
self.rewind()
return navigation
self.rewind()
return navigation
def getDatagramName(self, typeOfDatagram):
"""Convert the datagram type from the code to a user readable string.
Handy for displaying to the user"""
# Multibeam Data
if typeOfDatagram == "D":
return "D_Depth"
if typeOfDatagram == "X":
return "XYZ_Depth"
if typeOfDatagram == "K":
return "K_CentralBeam"
if typeOfDatagram == "F":
return "F_RawRange"
if typeOfDatagram == "f":
return "f_RawRange"
if typeOfDatagram == "N":
return "N_RawRange"
if typeOfDatagram == "S":
return "S_SeabedImage"
if typeOfDatagram == "Y":
return "Y_SeabedImage"
if typeOfDatagram == "k":
return "k_WaterColumn"
if typeOfDatagram == "O":
return "O_QualityFactor"
# ExternalSensors
if typeOfDatagram == "A":
return "A_Attitude"
if typeOfDatagram == "n":
return "network_Attitude"
if typeOfDatagram == "C":
return "C_Clock"
if typeOfDatagram == "h":
return "h_Height"
if typeOfDatagram == "H":
return "H_Heading"
if typeOfDatagram == "P":
return "P_Position"
if typeOfDatagram == "E":
return "E_SingleBeam"
if typeOfDatagram == "T":
return "T_Tide"
# SoundSpeed
if typeOfDatagram == "G":
return "G_SpeedSoundAtHead"
if typeOfDatagram == "U":
return "U_SpeedSoundProfile"
if typeOfDatagram == "W":
return "W_SpeedSOundProfileUsed"
# Multibeam parameters
if typeOfDatagram == "I":
return "I_Installation_Start"
if typeOfDatagram == "i":
return "i_Installation_Stop"
if typeOfDatagram == "R":
return "R_Runtime"
if typeOfDatagram == "J":
return "J_TransducerTilt"
if typeOfDatagram == "3":
return "3_ExtraParameters"
# PU information and status
if typeOfDatagram == "0":
return "0_PU_ID"
if typeOfDatagram == "1":
return "1_PU_Status"
if typeOfDatagram == "B":
return "B_BIST_Result"
class cBeam:
def __init__(self, beamDetail, angle):
self.sortingDirection = beamDetail[0]
self.detectionInfo = beamDetail[1]
self.numberOfSamplesPerBeam = beamDetail[2]
self.centreSampleNumber = beamDetail[3]
self.sector = 0
self.takeOffAngle = angle # used for ARC computation
self.sampleSum = 0 # used for backscatter ARC computation process
self.samples = []
class A_ATTITUDE_ENCODER:
def __init__(self):
self.data = 0
def encode(self, recordsToAdd, counter):
"""Encode a list of attitude records where the format is timestamp,
roll, pitch, heave heading"""
if len(recordsToAdd) == 0:
return
fullDatagram = bytearray()
header_fmt = "=LBBHLLHHH"
header_len = struct.calcsize(header_fmt)
rec_fmt = "HHhhhHB"
rec_len = struct.calcsize(rec_fmt)
footer_fmt = "=BH"
footer_len = struct.calcsize(footer_fmt)
STX = 2
typeOfDatagram = 65
model = 2045
systemDescriptor = 0
# set heading is ENABLED (go figure!)
systemDescriptor = set_bit(systemDescriptor, 0)
serialNumber = 999
numEntries = len(recordsToAdd)
fullDatagramByteCount = header_len + (rec_len * len(recordsToAdd)) + footer_len
# we need to know the first record timestamp as all observations are
# milliseconds from that time
firstRecordTimestamp = float(recordsToAdd[0][0])
firstRecordDate = from_timestamp(firstRecordTimestamp)
recordDate = int(dateToKongsbergDate(firstRecordDate))
recordTime = int(dateToSecondsSinceMidnight(firstRecordDate) * 1000)
# we need to deduct 4 bytes as the field does not account for the
# 4-byte message length data which precedes the message
try:
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
STX,
typeOfDatagram,
model,
recordDate,
recordTime,
counter,
serialNumber,
numEntries,
)
except Exception as ex:
print("error encoding attitude")
# header = struct.pack(
# header_fmt, fullDatagramByteCount-4, STX, typeOfDatagram,
# model, recordDate, recordTime, counter, serialNumber,
# numEntries
# )
fullDatagram = fullDatagram + header
# now pack avery record from the list
for record in recordsToAdd:
# compute the millisecond offset of the record from the first
# record in the datagram
timeMillisecs = round((float(record[0]) - firstRecordTimestamp) * 1000)
sensorStatus = 0
roll = float(record[1])
pitch = float(record[2])
heave = float(record[3])
heading = float(record[4])
try:
bodyRecord = struct.pack(
rec_fmt,
timeMillisecs,
sensorStatus,
int(roll * 100),
int(pitch * 100),
int(heave * 100),
int(heading * 100),
systemDescriptor,
)
except Exception as ex:
print("error encoding attitude")
bodyRecord = struct.pack(
rec_fmt,
timeMillisecs,
sensorStatus,
int(roll * 100),
int(pitch * 100),
int(heave * 100),
int(heading * 100),
systemDescriptor,
)
fullDatagram = fullDatagram + bodyRecord
# now do the footer
# # set roll is DISABLED
# systemDescriptor = set_bit(systemDescriptor, 1)
# # set pitch is DISABLED
# systemDescriptor = set_bit(systemDescriptor, 2)
# # set heave is DISABLED
# systemDescriptor = set_bit(systemDescriptor, 3)
# # set SENSOR as system 2
# systemDescriptor = set_bit(systemDescriptor, 4)
# systemDescriptor = 30
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
# TEST THE CRC CODE pkpk
# c = CRC16()
# chk = c.calculate(fullDatagram)
return fullDatagram
class A_ATTITUDE:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "A"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.NumberEntries = s[8]
rec_fmt = "=HHhhhH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# we need to store all the attitude data in a list
self.Attitude = [0 for i in range(self.NumberEntries)]
i = 0
while i < self.NumberEntries:
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
# time,status,roll,pitch,heave,heading
self.Attitude[i] = [
self.RecordDate,
self.Time + float(s[0] / 1000.0),
s[1],
s[2] / 100.0,
s[3] / 100.0,
s[4] / 100.0,
s[5] / 100.0,
]
i = i + 1
rec_fmt = "=BBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.systemDescriptor = s[0]
self.ETX = s[1]
self.checksum = s[2]
class C_CLOCK:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "C"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHLLBBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.time = float(s[5] / 1000.0)
self.ClockCounter = s[6]
self.SerialNumber = s[7]
self.ExternalDate = s[8]
self.ExternalTime = s[9] / 1000.0
self.PPS = s[10]
self.ETX = s[11]
self.checksum = s[12]
def __str__(self):
if self.PPS == 0:
ppsInUse = "PPS NOT in use"
else:
ppsInUse = "PPS in use"
s = "%d,%d,%.3f,%.3f,%.3f,%s" % (
self.RecordDate,
self.ExternalDate,
self.time,
self.ExternalTime,
self.time - self.ExternalTime,
ppsInUse,
)
return s
class D_DEPTH:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "D"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHHHHBBBBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.Heading = float(s[8] / float(100))
self.SoundSpeedAtTransducer = float(s[9] / float(10))
self.TransducerDepth = float(s[10] / float(100))
self.MaxBeams = s[11]
self.NBeams = s[12]
self.ZResolution = float(s[13] / float(100))
self.XYResolution = float(s[14] / float(100))
self.SampleFrequency = s[15]
self.Depth = [0 for i in range(self.NBeams)]
self.AcrossTrackDistance = [0 for i in range(self.NBeams)]
self.AlongTrackDistance = [0 for i in range(self.NBeams)]
self.BeamDepressionAngle = [0 for i in range(self.NBeams)]
self.BeamAzimuthAngle = [0 for i in range(self.NBeams)]
self.Range = [0 for i in range(self.NBeams)]
self.QualityFactor = [0 for i in range(self.NBeams)]
self.LengthOfDetectionWindow = [0 for i in range(self.NBeams)]
self.Reflectivity = [0 for i in range(self.NBeams)]
self.BeamNumber = [0 for i in range(self.NBeams)]
# now read the variable part of the Record
if self.EMModel < 700:
rec_fmt = "=H3h2H2BbB"
else:
rec_fmt = "=4h2H2BbB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
i = 0
while i < self.NBeams:
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.Depth[i] = float(s[0] / float(100))
self.AcrossTrackDistance[i] = float(s[1] / float(100))
self.AlongTrackDistance[i] = float(s[2] / float(100))
self.BeamDepressionAngle[i] = float(s[3] / float(100))
self.BeamAzimuthAngle[i] = float(s[4] / float(100))
self.Range[i] = float(s[5] / float(100))
self.QualityFactor[i] = s[6]
self.LengthOfDetectionWindow[i] = s[7]
self.Reflectivity[i] = float(s[8] / float(100))
self.BeamNumber[i] = s[9]
# now do some sanity checks. We have examples where the Depth
# and Across track values are NaN
if math.isnan(self.Depth[i]):
self.Depth[i] = 0
if math.isnan(self.AcrossTrackDistance[i]):
self.AcrossTrackDistance[i] = 0
if math.isnan(self.AlongTrackDistance[i]):
self.AlongTrackDistance[i] = 0
i = i + 1
rec_fmt = "=bBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.RangeMultiplier = s[0]
self.ETX = s[1]
self.checksum = s[2]
def encode(self):
"""Encode a Depth D datagram record"""
header_fmt = "=LBBHLLHHHHHBBBBH"
header_len = struct.calcsize(header_fmt)
fullDatagram = bytearray()
# now read the variable part of the Record
if self.EMModel < 700:
rec_fmt = "=H3h2H2BbB"
else:
rec_fmt = "=4h2H2BbB"
rec_len = struct.calcsize(rec_fmt)
footer_fmt = "=BBH"
footer_len = struct.calcsize(footer_fmt)
fullDatagramByteCount = header_len + (rec_len * self.NBeams) + footer_len
# pack the header
recordTime = int(dateToSecondsSinceMidnight(from_timestamp(self.Time)) * 1000)
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
self.STX,
ord(self.typeOfDatagram),
self.EMModel,
self.RecordDate,
recordTime,
int(self.Counter),
int(self.SerialNumber),
int(self.Heading * 100),
int(self.SoundSpeedAtTransducer * 10),
int(self.TransducerDepth * 100),
int(self.MaxBeams),
int(self.NBeams),
int(self.ZResolution * 100),
int(self.XYResolution * 100),
int(self.SampleFrequency),
)
fullDatagram = fullDatagram + header
header_fmt = "=LBBHLLHHHHHBBBBH"
# pack the beam summary info
for i in range(self.NBeams):
bodyRecord = struct.pack(
rec_fmt,
int(self.Depth[i] * 100),
int(self.AcrossTrackDistance[i] * 100),
int(self.AlongTrackDistance[i] * 100),
int(self.BeamDepressionAngle[i] * 100),
int(self.BeamAzimuthAngle[i] * 100),
int(self.Range[i] * 100),
self.QualityFactor[i],
self.LengthOfDetectionWindow[i],
int(self.Reflectivity[i] * 100),
self.BeamNumber[i],
)
fullDatagram = fullDatagram + bodyRecord
tmp = struct.pack("=b", self.RangeMultiplier)
fullDatagram = fullDatagram + tmp
# now pack the footer
# systemDescriptor = 1
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
class E_EXTRA:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "3"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.ExtraData = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.ContentIdentifier = s[8]
# now read the variable position part of the Record
if self.numberOfBytes % 2 != 0:
bytesToRead = self.numberOfBytes - rec_len - 5 # 'sBBH'
else:
bytesToRead = self.numberOfBytes - rec_len - 4 # 'sBH'
# now read the block of data whatever it may contain
self.data = self.fileptr.read(bytesToRead)
# # now spare byte only if necessary
# if self.numberOfBytes % 2 != 0:
# self.fileptr.read(1)
# read an empty byte
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
###############################################################################
class f_RAWRANGE:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "f"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHH HHLl4H"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.PingCounter = s[6]
self.SerialNumber = s[7]
self.NumTransmitSector = s[8]
self.NumReceiveBeams = s[9]
self.SampleFrequency = float(s[10] / 100)
self.ROVDepth = s[11]
self.SoundSpeedAtTransducer = s[12] / 10
self.MaxBeams = s[13]
self.Spare1 = s[14]
self.Spare2 = s[15]
self.TiltAngle = [0 for i in range(self.NumTransmitSector)]
self.FocusRange = [0 for i in range(self.NumTransmitSector)]
self.SignalLength = [0 for i in range(self.NumTransmitSector)]
self.SectorTransmitDelay = [0 for i in range(self.NumTransmitSector)]
self.CentreFrequency = [0 for i in range(self.NumTransmitSector)]
self.MeanAbsorption = [0 for i in range(self.NumTransmitSector)]
self.SignalWaveformID = [0 for i in range(self.NumTransmitSector)]
self.TransmitSectorNumberTX = [0 for i in range(self.NumTransmitSector)]
self.SignalBandwidth = [0 for i in range(self.NumTransmitSector)]
self.BeamPointingAngle = [0 for i in range(self.NumReceiveBeams)]
self.TransmitSectorNumber = [0 for i in range(self.NumReceiveBeams)]
self.DetectionInfo = [0 for i in range(self.NumReceiveBeams)]
self.DetectionWindow = [0 for i in range(self.NumReceiveBeams)]
self.QualityFactor = [0 for i in range(self.NumReceiveBeams)]
self.DCorr = [0 for i in range(self.NumReceiveBeams)]
self.TwoWayTravelTime = [0 for i in range(self.NumReceiveBeams)]
self.Reflectivity = [0 for i in range(self.NumReceiveBeams)]
self.RealtimeCleaningInformation = [0 for i in range(self.NumReceiveBeams)]
self.Spare = [0 for i in range(self.NumReceiveBeams)]
self.BeamNumber = [0 for i in range(self.NumReceiveBeams)]
# # now read the variable part of the Transmit Record
rec_fmt = "=hHLLLHBB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
for i in range(self.NumTransmitSector):
data = self.fileptr.read(rec_len)
bytesRead += rec_len
s = rec_unpack(data)
self.TiltAngle[i] = float(s[0]) / 100.0
self.FocusRange[i] = s[1] / 10
self.SignalLength[i] = s[2]
self.SectorTransmitDelay[i] = s[3]
self.CentreFrequency[i] = s[4]
self.SignalBandwidth[i] = s[5]
self.SignalWaveformID[i] = s[6]
self.TransmitSectorNumberTX[i] = s[7]
# now read the variable part of the recieve record
rx_rec_fmt = "=hHBbBBhH"
rx_rec_len = struct.calcsize(rx_rec_fmt)
rx_rec_unpack = struct.Struct(rx_rec_fmt).unpack
for i in range(self.NumReceiveBeams):
data = self.fileptr.read(rx_rec_len)
rx_s = rx_rec_unpack(data)
bytesRead += rx_rec_len
self.BeamPointingAngle[i] = float(rx_s[0]) / 100.0
self.TwoWayTravelTime[i] = float(rx_s[1]) / (4 * self.SampleFrequency)
self.TransmitSectorNumber[i] = rx_s[2]
self.Reflectivity[i] = rx_s[3] / 2.0
self.QualityFactor[i] = rx_s[4]
self.DetectionWindow[i] = rx_s[5]
self.BeamNumber[i] = rx_s[6]
rec_fmt = "=BBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.ETX = s[1]
self.checksum = s[2]
def encode(self):
"""Encode a Depth f datagram record"""
systemDescriptor = 1
header_fmt = "=LBBHLLHH HHLl4H"
header_len = struct.calcsize(header_fmt)
fullDatagram = bytearray()
# # now read the variable part of the Transmit Record
rec_fmt = "=hHLLLHBB"
rec_len = struct.calcsize(rec_fmt)
# now read the variable part of the recieve record
rx_rec_fmt = "=hHBbBBhHB"
rx_rec_len = struct.calcsize(rx_rec_fmt)
footer_fmt = "=BH"
footer_len = struct.calcsize(footer_fmt)
fullDatagramByteCount = (
header_len
+ (rec_len * self.NumTransmitSector)
+ (rx_rec_len * self.NumReceiveBeams)
+ footer_len
)
# pack the header
recordTime = int(dateToSecondsSinceMidnight(from_timestamp(self.Time)) * 1000)
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
self.STX,
ord(self.typeOfDatagram),
self.EMModel,
self.RecordDate,
recordTime,
self.PingCounter,
self.SerialNumber,
self.NumTransmitSector,
self.NumReceiveBeams,
int(self.SampleFrequency * 100),
self.ROVDepth,
int(self.SoundSpeedAtTransducer * 10),
self.MaxBeams,
self.Spare1,
self.Spare2,
)
fullDatagram = fullDatagram + header
for i in range(self.NumTransmitSector):
sectorRecord = struct.pack(
rec_fmt,
int(self.TiltAngle[i] * 100),
int(self.FocusRange[i] * 10),
self.SignalLength[i],
self.SectorTransmitDelay[i],
self.CentreFrequency[i],
self.SignalBandwidth[i],
self.SignalWaveformID[i],
self.TransmitSectorNumberTX[i],
)
fullDatagram = fullDatagram + sectorRecord
# pack the beam summary info
for i in range(self.NumReceiveBeams):
bodyRecord = struct.pack(
rx_rec_fmt,
int(self.BeamPointingAngle[i] * 100.0),
int(self.TwoWayTravelTime[i] * (4 * self.SampleFrequency)),
self.TransmitSectorNumber[i],
int(self.Reflectivity[i] * 2.0),
self.QualityFactor[i],
self.DetectionWindow[i],
self.BeamNumber[i],
self.Spare1,
systemDescriptor,
)
fullDatagram = fullDatagram + bodyRecord
# now pack the footer
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
class G_SPEEDSOUNDATHEAD:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "G"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHH" + "H"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.NumberOfEntries = s[8]
self.SoundSpeedEntries = []
# now read the variable part of the sound speed record
ss_entry_fmt = "=HH"
ss_entry_len = struct.calcsize(ss_entry_fmt)
ss_entry_unpack = struct.Struct(ss_entry_fmt).unpack
for i in range(self.NumberOfEntries):
data = self.fileptr.read(ss_entry_len)
ss_entry_s = ss_entry_unpack(data)
bytesRead += ss_entry_len
timeInSecondsSinceRecordStart = ss_entry_s[0]
# Sound speed in dm/s (incl. offset)
soundSpeed = ss_entry_s[1]
self.SoundSpeedEntries.append([timeInSecondsSinceRecordStart, soundSpeed])
spare_fmt = "=B"
spare_len = struct.calcsize(spare_fmt)
spare_unpack = struct.Struct(spare_fmt).unpack
spare_data = self.fileptr.read(spare_len)
spare_values = spare_unpack(spare_data)
assert spare_values[0] == 0 # as stated on kongsberg doc
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
def __repr__(self):
entriesstr = ""
for entry in self.SoundSpeedEntries:
entriesstr += " {}, {}\n".format(*entry)
return (
"STX {} \n"
"typeOfDatagram {} \n"
"NumberOfEntries {} \n"
"Sound speed entries: \n{} \n"
.format(self.STX, self.typeOfDatagram, self.NumberOfEntries, entriesstr)
)
class h_HEIGHT:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "h"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
self.Height = None
self.HeightType = None
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHlB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.Height = float(s[8] / float(100))
self.HeightType = s[9]
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
def __repr__(self):
return (
"STX {} \n"
"typeOfDatagram {} \n"
"Height {} \n"
"HeightType {} \n"
.format(self.STX, self.typeOfDatagram, self.Height, self.HeightType)
)
class h_HEIGHT_ENCODER:
def __init__(self):
self.data = 0
def encode(self, height, recordDate, recordTime, counter):
"""Encode a Height datagram record"""
rec_fmt = "=LBBHLLHHlB"
rec_len = struct.calcsize(rec_fmt)
# 0 = the height of the waterline at the vertical datum (from KM #
# datagram manual)
heightType = 0
serialNumber = 999
STX = 2
typeOfDatagram = "h"
checksum = 0
# needs to be a sensible value to record is valid. Maybe would be
# better to pass this from above
model = 2045
try:
fullDatagram = struct.pack(
rec_fmt,
rec_len - 4,
STX,
ord(typeOfDatagram),
model,
int(recordDate),
int(recordTime),
counter,
serialNumber,
int(height * 100),
int(heightType),
)
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
except:
print("error encoding height field")
# header = struct.pack(rec_fmt, rec_len-4, STX, ord(typeOfDatagram), model, int(recordDate), int(recordTime), counter, serialNumber, int(height * 100), int(heightType), ETX, checksum)
return fullDatagram
class I_INSTALLATION:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = 'I' # assign the KM code for this datagram type
self.offset = fileptr.tell() # remember where this packet resides in the file so we can return if needed
self.numberOfBytes = numberOfBytes # remember how many bytes this packet contains. This includes the first 4 bytes represnting the number of bytes inthe datagram
self.fileptr = fileptr # remember the file pointer so we do not need to pass from the host process
self.fileptr.seek(numberOfBytes, 1) # move the file pointer to the end of the record so we can skip as the default actions
self.data = ""
def read(self):
# move the file pointer to the start of the record so we can read
# from disc
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLL3H"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# read the record from disc
bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.SurveyLineNumber = s[6]
self.SerialNumber = s[7]
self.SecondarySerialNumber = s[8]
totalAsciiBytes = (
self.numberOfBytes - rec_len
) # we do not need to read the header twice
data = self.fileptr.read(totalAsciiBytes) # read the record from disc
bytesRead = bytesRead + totalAsciiBytes
parameters = data.decode("utf-8", errors="ignore").split(",")
self.installationParameters = {}
for p in parameters:
parts = p.split("=")
# print (parts)
if len(parts) > 1:
self.installationParameters[parts[0]] = parts[1].strip()
# read any trailing bytes. We have seen the need for this with some
# .all files.
if bytesRead < self.numberOfBytes:
self.fileptr.read(int(self.numberOfBytes - bytesRead))
class n_ATTITUDE:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "n"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHHbB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.NumberEntries = s[8]
self.SystemDescriptor = s[9]
rec_fmt = "=HhhhHB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# we need to store all the attitude data in a list
self.Attitude = [0 for i in range(self.NumberEntries)]
i = 0
while i < self.NumberEntries:
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
inputTelegramSize = s[5]
data = self.fileptr.read(inputTelegramSize)
self.Attitude[i] = [
self.RecordDate,
self.Time + s[0] / 1000,
s[1],
s[2] / 100.0,
s[3] / 100.0,
s[4] / 100.0,
s[5],
data,
]
i = i + 1
# # now spare byte only if necessary
# if self.numberOfBytes % 2 != 0:
# self.fileptr.read(1)
# read an empty byte
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
class N_TRAVELTIME:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "N"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHHHHHfL"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.SoundSpeedAtTransducer = s[8]
self.NumTransmitSector = s[9]
self.NumReceiveBeams = s[10]
self.NumValidDetect = s[11]
self.SampleFrequency = float(s[12])
self.DScale = s[13]
self.TiltAngle = [0 for i in range(self.NumTransmitSector)]
self.FocusRange = [0 for i in range(self.NumTransmitSector)]
self.SignalLength = [0 for i in range(self.NumTransmitSector)]
self.SectorTransmitDelay = [0 for i in range(self.NumTransmitSector)]
self.CentreFrequency = [0 for i in range(self.NumTransmitSector)]
self.MeanAbsorption = [0 for i in range(self.NumTransmitSector)]
self.SignalWaveformID = [0 for i in range(self.NumTransmitSector)]
self.TransmitSectorNumberTX = [0 for i in range(self.NumTransmitSector)]
self.SignalBandwidth = [0 for i in range(self.NumTransmitSector)]
self.BeamPointingAngle = [0 for i in range(self.NumReceiveBeams)]
self.TransmitSectorNumber = [0 for i in range(self.NumReceiveBeams)]
self.DetectionInfo = [0 for i in range(self.NumReceiveBeams)]
self.DetectionWindow = [0 for i in range(self.NumReceiveBeams)]
self.QualityFactor = [0 for i in range(self.NumReceiveBeams)]
self.DCorr = [0 for i in range(self.NumReceiveBeams)]
self.TwoWayTravelTime = [0 for i in range(self.NumReceiveBeams)]
self.Reflectivity = [0 for i in range(self.NumReceiveBeams)]
self.RealtimeCleaningInformation = [0 for i in range(self.NumReceiveBeams)]
self.Spare = [0 for i in range(self.NumReceiveBeams)]
# # now read the variable part of the Transmit Record
rec_fmt = "=hHfffHBBf"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
for i in range(self.NumTransmitSector):
data = self.fileptr.read(rec_len)
bytesRead += rec_len
s = rec_unpack(data)
self.TiltAngle[i] = float(s[0]) / float(100)
self.FocusRange[i] = s[1]
self.SignalLength[i] = float(s[2])
self.SectorTransmitDelay[i] = float(s[3])
self.CentreFrequency[i] = float(s[4])
self.MeanAbsorption[i] = s[5]
self.SignalWaveformID[i] = s[6]
self.TransmitSectorNumberTX[i] = s[7]
self.SignalBandwidth[i] = float(s[8])
# now read the variable part of the recieve record
rx_rec_fmt = "=hBBHBbfhbB"
rx_rec_len = struct.calcsize(rx_rec_fmt)
rx_rec_unpack = struct.Struct(rx_rec_fmt).unpack
for i in range(self.NumReceiveBeams):
data = self.fileptr.read(rx_rec_len)
rx_s = rx_rec_unpack(data)
bytesRead += rx_rec_len
self.BeamPointingAngle[i] = float(rx_s[0]) / float(100)
self.TransmitSectorNumber[i] = rx_s[1]
self.DetectionInfo[i] = rx_s[2]
self.DetectionWindow[i] = rx_s[3]
self.QualityFactor[i] = rx_s[4]
self.DCorr[i] = rx_s[5]
self.TwoWayTravelTime[i] = float(rx_s[6])
self.Reflectivity[i] = rx_s[7]
self.RealtimeCleaningInformation[i] = rx_s[8]
self.Spare[i] = rx_s[9]
rec_fmt = "=BBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.ETX = s[1]
self.checksum = s[2]
class O_QUALITYFACTOR:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "O"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.data = ""
self.fileptr.seek(numberOfBytes, 1)
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHHBB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.NBeams = s[8]
self.NParPerBeam = s[9]
self.Spare = s[10]
self.QualityFactor = [0 for i in range(self.NBeams)]
rec_fmt = "=" + str(self.NParPerBeam) + "f"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
i = 0
while i < self.NBeams:
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.QualityFactor[i] = float(s[0])
i = i + 1
rec_fmt = "=bBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.RangeMultiplier = s[0]
self.ETX = s[1]
self.checksum = s[2]
def encode(self):
"""Encode an O_QUALITYFACTOR datagram record"""
header_fmt = "=LBBHLLHHHBB"
header_len = struct.calcsize(header_fmt)
fullDatagram = bytearray()
# now read the variable part of the Record
rec_fmt = "=" + str(self.NParPerBeam) + "f"
rec_len = struct.calcsize(rec_fmt)
# rec_unpack = struct.Struct(rec_fmt).unpack
footer_fmt = "=BBH"
footer_len = struct.calcsize(footer_fmt)
fullDatagramByteCount = (
header_len + (rec_len * self.NBeams * self.NParPerBeam) + footer_len
)
# pack the header
recordTime = int(dateToSecondsSinceMidnight(from_timestamp(self.Time)) * 1000)
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
self.STX,
ord(self.typeOfDatagram),
self.EMModel,
self.RecordDate,
recordTime,
int(self.Counter),
int(self.SerialNumber),
int(self.NBeams),
int(self.NParPerBeam),
int(self.Spare),
)
fullDatagram = fullDatagram + header
# pack the beam summary info
for i in range(self.NBeams):
# for j in range (self.NParPerBeam):
# for now pack the same value. If we see any .all files with more
# than 1, we can test and fix this. pkpk
bodyRecord = struct.pack(rec_fmt, float(self.QualityFactor[i]))
fullDatagram = fullDatagram + bodyRecord
# now pack the footer
# systemDescriptor = 1
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack(footer_fmt, 0, ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
class P_POSITION:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "P" # assign the KM code for this datagram type
# remember where this packet resides in the file so we can return if
# needed
self.offset = fileptr.tell()
self.numberOfBytes = (
numberOfBytes # remember how many bytes this packet contains
)
# remember the file pointer so we do not need to pass from the host
# process
self.fileptr = fileptr
# move the file pointer to the end of the record so we can skip as
# the default actions
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
def read(self):
# move the file pointer to the start of the record so we can read
# from disc
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHll4HBB"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# bytesRead = rec_len
s = rec_unpack(self.fileptr.read(rec_len))
self.numberOfBytes = s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.Latitude = float(s[8] / float(20000000))
self.Longitude = float(s[9] / float(10000000))
self.Quality = float(s[10] / float(100))
self.SpeedOverGround = float(s[11] / float(100))
self.CourseOverGround = float(s[12] / float(100))
self.Heading = float(s[13] / float(100))
self.Descriptor = s[14]
self.NBytesDatagram = s[15]
# now spare byte only if necessary
if (rec_len + self.NBytesDatagram + 3) % 2 != 0:
self.NBytesDatagram += 1
# now read the block of data whatever it may contain
self.data = self.fileptr.read(self.NBytesDatagram)
# # now spare byte only if necessary
# if (rec_len + self.NBytesDatagram + 3) % 2 != 0:
# self.fileptr.read(1)
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
def readFooter(numberOfBytes, fileptr):
rec_fmt = "=BH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(fileptr.read(rec_len))
ETX = s[0]
checksum = s[1]
# self.DatagramAsReceived = s[0].decode('utf-8').rstrip('\x00')
# if numberOfBytes % 2 == 0:
# # skip the spare byte
# ETX = s[2]
# checksum = s[3]
# else:
# ETX = s[1]
# checksum = s[2]
# #read any trailing bytes. We have seen the need for this with some .all files.
# if bytesRead < self.numberOfBytes:
# self.fileptr.read(int(self.numberOfBytes - bytesRead))
return ETX, checksum
class P_POSITION_ENCODER:
def __init__(self):
self.data = 0
def encode(
self,
recordDate,
recordTime,
counter,
latitude,
longitude,
quality,
speedOverGround,
courseOverGround,
heading,
descriptor,
nBytesDatagram,
data,
):
"""Encode a Position datagram record"""
rec_fmt = "=LBBHLLHHll4HBB"
rec_len = struct.calcsize(rec_fmt)
# heightType = 0 #0 = the height of the waterline at the vertical datum (from KM datagram manual)
serialNumber = 999
STX = 2
typeOfDatagram = "P"
checksum = 0
# needs to be a sensible value to record is valid. Maybe would be
# better to pass this from above
model = 2045
# for now dont write out the raw position string. I am not sure if
# this helps or not. It can be included if we feel it adds value
# over confusion
data = ""
# try:
# fullDatagram = struct.pack(rec_fmt, rec_len-4, STX, ord(typeOfDatagram), model, int(recordDate), int(recordTime), counter, serialNumber, int(height * 100), int(heightType))
recordLength = (
rec_len - 4 + len(data) + 3
) # remove 4 bytes from header and add 3 more for footer
fullDatagram = struct.pack(
rec_fmt,
recordLength,
STX,
ord(typeOfDatagram),
model,
int(recordDate),
int(recordTime),
int(counter),
int(serialNumber),
int(latitude * float(20000000)),
int(longitude * float(10000000)),
int(quality * 100),
int(speedOverGround * float(100)),
int(courseOverGround * float(100)),
int(heading * float(100)),
int(descriptor),
int(len(data)),
)
# now add the raw bytes, typically NMEA GGA string
fullDatagram = fullDatagram + data.encode("ascii")
ETX = 3
checksum = sum(fullDatagram[5:]) % 65536
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
# except:
# print ("error encoding POSITION Record")
# return
###############################################################################
class R_RUNTIME:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "R" # assign the KM code for this datagram type
# remember where this packet resides in the file so we can return if
# needed
self.offset = fileptr.tell()
self.numberOfBytes = (
numberOfBytes # remember how many bytes this packet contains
)
# remember the file pointer so we do not need to pass from the host
# process
self.fileptr = fileptr
# move the file pointer to the end of the record so we can skip as
# the default actions
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
def read(self):
# move the file pointer to the start of the record so we can read
# from disc
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHBBBBBBHHHHHbBBBBBHBBBBHHBBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = s[5] / 1000
self.Counter = s[6]
self.SerialNumber = s[7]
self.operatorStationStatus = s[8]
self.processingUnitStatus = s[9]
self.BSPStatus = s[10]
self.sonarHeadStatus = s[11]
self.mode = s[12]
self.filterIdentifier = s[13]
self.minimumDepth = s[14]
self.maximumDepth = s[15]
self.absorptionCoefficient = s[16] / 100
self.transmitPulseLength = s[17]
self.transmitBeamWidth = s[18]*0.1
self.transmitPower = s[19]
self.receiveBeamWidth = s[20]*0.1
self.receiveBandwidth = s[21]
self.mode2 = s[22]
self.tvg = s[23]
self.SpeedSound = s[24]
self.maximumPortWidth = s[25]
self.beamSpacing = s[26]
self.maximumPortCoverageDegrees = s[27]
self.yawMode = s[28]
# self.yawAndPitchStabilisationMode= s[28]
self.maximumStbdCoverageDegrees = s[29]
self.maximumStbdWidth = s[30]
self.transmitAAlongTilt = s[31]
self.filterIdentifier2 = s[32]
self.ETX = s[33]
self.checksum = s[34]
self.beamSpacingString = "Determined by beamwidth"
if isBitSet(self.beamSpacing, 0):
self.beamSpacingString = "Equidistant"
if isBitSet(self.beamSpacing, 1):
self.beamSpacingString = "Equiangular"
if isBitSet(self.beamSpacing, 0) and isBitSet(self.beamSpacing, 1):
self.beamSpacingString = "High density equidistant"
if isBitSet(self.beamSpacing, 7):
self.beamSpacingString = self.beamSpacingString + "+Two Heads"
self.yawAndPitchStabilisationMode = "Yaw stabilised OFF"
if isBitSet(self.yawMode, 0):
self.yawAndPitchStabilisationMode = "Yaw stabilised ON"
if isBitSet(self.yawMode, 1):
self.yawAndPitchStabilisationMode = "Yaw stabilised ON"
if isBitSet(self.yawMode, 1) and isBitSet(self.yawMode, 0):
self.yawAndPitchStabilisationMode = "Yaw stabilised ON (manual)"
if ((not isBitSet(self.filterIdentifier, 2)) & (not isBitSet(self.filterIdentifier, 3))):
self.yawAndPitchStabilisationMode += "+Heading filter hard"
if (isBitSet(self.yawMode, 2)):
self.yawAndPitchStabilisationMode += "+Heading filter medium"
if (isBitSet(self.yawMode, 3)):
self.yawAndPitchStabilisationMode += "+Heading filter weak"
if (isBitSet(self.yawMode, 7)):
self.yawAndPitchStabilisationMode += "+Pitch stabilised ON"
self.DepthMode = "Very Shallow"
if isBitSet(self.mode, 0):
self.DepthMode = "Shallow"
if isBitSet(self.mode, 1):
self.DepthMode = "Medium"
if isBitSet(self.mode, 0) & (isBitSet(self.mode, 1)):
self.DepthMode = "Deep"
if isBitSet(self.mode, 2):
self.DepthMode = "Very Deep"
if isBitSet(self.mode, 0) & (isBitSet(self.mode, 2)):
self.DepthMode = "Extra Deep"
if str(self.EMModel) in 'EM2040, EM2045':
self.DepthMode = "200kHz"
if (isBitSet(self.mode, 0)):
self.DepthMode = "300kHz"
if (isBitSet(self.mode, 1)):
self.DepthMode = "400kHz"
if str(self.EMModel) == "EM2040C":
parameter = int(format(self.mode, '08b')[3:],2)
self.DepthMode = str(180 + (parameter * 10)) + "kHz"
self.RXarrayuse = np.nan
if str(self.EMModel) in 'EM2040, EM2045':
self.RXarrayuse = "Off (RX inactive)"
if isBitSet(self.mode2, 0):
self.RXarrayuse = "RX1 (port) active"
if isBitSet(self.mode2, 1):
self.RXarrayuse = "RX2 (starboard) active"
if isBitSet(self.mode2, 0) & (isBitSet(self.mode, 1)):
self.RXarrayuse = "Both RX units active"
self.Sonarheaduse = np.nan
if str(self.EMModel) == 'EM2040C':
self.Sonarheaduse = "Off (Both inactive)"
if isBitSet(self.mode2, 0):
self.Sonarheaduse = "SH1 (port) active"
if isBitSet(self.mode2, 1):
self.Sonarheaduse = "SH2 (starboard) active"
if isBitSet(self.mode2, 0) & (isBitSet(self.mode, 1)):
self.Sonarheaduse = "Both active"
self.Pulselength = np.nan
if str(self.EMModel) in 'EM2040, EM2045':
self.Pulselength = "Short CW"
if isBitSet(self.mode2, 2):
self.Pulselength = "Medium CW"
if isBitSet(self.mode2, 3):
self.Pulselength = "Long CW"
if isBitSet(self.mode2, 3) & (isBitSet(self.mode, 2)):
self.Pulselength = "FM"
#if ((not isBitSet(self.filterIdentifier, 4)) & (not isBitSet(self.filterIdentifier, 7))):
if str(self.EMModel) == 'EM2040C':
self.Pulselength = "Very Short CW"
if isBitSet(self.mode2, 4):
self.Pulselength = "Short CW"
if isBitSet(self.mode2, 5):
self.Pulselength = "Medium CW"
if isBitSet(self.mode2, 5) & (isBitSet(self.mode, 4)):
self.Pulselength = "Long CW"
if isBitSet(self.mode2, 6):
self.Pulselength = "Very Long CW"
if isBitSet(self.mode2, 5) & (isBitSet(self.mode, 4)):
self.Pulselength = "Extra Long CW"
self.RXfixedgain = np.nan
if str(self.EMModel) not in 'EM2040, EM2045, EM2040C':
self.RXfixedgain = self.mode2
self.TXPulseForm = "CW"
if isBitSet(self.mode, 4):
self.TXPulseForm = "Mixed"
if isBitSet(self.mode, 5):
self.TXPulseForm = "FM"
self.dualSwathMode = "Off"
if isBitSet(self.mode, 6):
self.dualSwathMode = "Fixed"
if isBitSet(self.mode, 7):
self.dualSwathMode = "Dynamic"
self.sourceOfSpeedSound = "From realtime sensor"
if isBitSet(self.SpeedSound, 0):
self.sourceOfSpeedSound = "Manually entered by operator"
if isBitSet(self.SpeedSound, 1):
self.sourceOfSpeedSound = "Interpolated from currently used SSP"
if isBitSet(self.SpeedSound, 0) and isBitSet(self.SpeedSound, 1):
self.sourceOfSpeedSound = "Calculated by ME70BO TRU"
if isBitSet(self.SpeedSound, 4):
self.sourceOfSpeedSound += "Extra Detections ON"
if isBitSet(self.SpeedSound, 5):
self.sourceOfSpeedSound += "Sonar Mode ON"
if isBitSet(self.SpeedSound, 6):
self.sourceOfSpeedSound += "Passive Mode ON"
if isBitSet(self.SpeedSound, 7):
self.sourceOfSpeedSound += "3D Scanning ON"
self.filterSetting = "Spike Filter Off"
if (isBitSet(self.filterIdentifier, 0)):
self.filterSetting = "Spike Filter Weak"
if (isBitSet(self.filterIdentifier, 1)):
self.filterSetting = "Spike Filter Medium"
if (isBitSet(self.filterIdentifier, 0) & (isBitSet(self.filterIdentifier, 1))):
self.filterSetting = "Spike Filter Strong"
if (isBitSet(self.filterIdentifier, 2)):
self.filterSetting += "+Slope ON"
if (isBitSet(self.filterIdentifier, 3)):
self.filterSetting += "+Sector Tracking ON"
if ((not isBitSet(self.filterIdentifier, 4)) & (not isBitSet(self.filterIdentifier, 7))):
self.filterSetting += "+Range Gates Normal"
if ((isBitSet(self.filterIdentifier, 4)) & (not isBitSet(self.filterIdentifier, 7))):
self.filterSetting += "+Range Gates Large"
if ((not isBitSet(self.filterIdentifier, 4)) & (isBitSet(self.filterIdentifier, 7))):
self.filterSetting += "+Range Gates Small"
if (isBitSet(self.filterIdentifier, 5)):
self.filterSetting += "+Aeration Filter ON"
if (isBitSet(self.filterIdentifier, 6)):
self.filterSetting += "+Interference Filter ON"
self.Penetrationfilter = np.nan
if ((not isBitSet(self.filterIdentifier2, 0)) & (not isBitSet(self.filterIdentifier2, 1))):
self.Penetrationfilter = "Off"
if (isBitSet(self.filterIdentifier2, 0)):
self.Penetrationfilter = "Weak"
if (isBitSet(self.filterIdentifier2, 1)):
self.Penetrationfilter = "Medium"
if (isBitSet(self.filterIdentifier2, 0) & (isBitSet(self.filterIdentifier2, 1))):
self.Penetrationfilter = "Strong"
self.Detectmode = np.nan
if ((not isBitSet(self.filterIdentifier2, 2)) & (not isBitSet(self.filterIdentifier2, 3))):
self.Detectmode = "Normal"
if (isBitSet(self.filterIdentifier2, 2)):
self.Detectmode = "Waterway"
if (isBitSet(self.filterIdentifier2, 3)):
self.Detectmode = "Tracking"
if (isBitSet(self.filterIdentifier2, 2) & (isBitSet(self.filterIdentifier2, 3))):
self.Detectmode = "Minimum depth"
self.Phaseramp = np.nan
if ((not isBitSet(self.filterIdentifier2, 4)) & (not isBitSet(self.filterIdentifier2, 5))):
self.Phaseramp = "Short"
if (isBitSet(self.filterIdentifier2, 4)):
self.Phaseramp = "Normal"
if (isBitSet(self.filterIdentifier2, 5)):
self.Phaseramp = "Long"
self.SpecialTVG = np.nan
if (not isBitSet(self.filterIdentifier2, 6)):
self.SpecialTVG = "Normal"
if (isBitSet(self.filterIdentifier2, 6)):
self.SpecialTVG = "Special"
self.Specialampdetect = np.nan
if (not isBitSet(self.filterIdentifier2, 7)):
self.Specialampdetect = "Normal"
if (isBitSet(self.filterIdentifier2, 7)):
self.Specialampdetect = "Special"
if str(self.EMModel) == 'EM3002':
if (isBitSet(self.filterIdentifier2, 7)):
self.Specialampdetect = "Soft sediments"
self.HiLo = np.nan
if str(self.EMModel) == 'EM1002':
self.HiLo = self.filterIdentifier2
def header(self):
header = ""
header += "typeOfDatagram,"
header += "EMModel,"
header += "RecordDate,"
header += "Time,"
header += "Counter,"
header += "SerialNumber,"
header += "operatorStationStatus,"
header += "processingUnitStatus,"
header += "BSPStatus,"
header += "sonarHeadStatus,"
header += "mode,"
header += "dualSwathMode,"
header += "TXPulseForm,"
header += "filterIdentifier,"
header += "filterSetting,"
header += "minimumDepth,"
header += "maximumDepth,"
header += "absorptionCoefficient,"
header += "transmitPulseLength,"
header += "transmitBeamWidth,"
header += "transmitPower,"
header += "receiveBeamWidth,"
header += "receiveBandwidth,"
header += "mode2,"
header += "tvg,"
header += "sourceOfSpeedSound,"
header += "maximumPortWidth,"
header += "beamSpacing,"
header += "maximumPortCoverageDegrees,"
header += "yawMode,"
header += "yawAndPitchStabilisationMode,"
header += "maximumStbdCoverageDegrees,"
header += "maximumStbdWidth,"
header += "transmitAAlongTilt,"
header += "filterIdentifier2,"
header += "DepthMode,"
header += "RXarrayuse,"
header += "Sonarheaduse,"
header += "Pulselength,"
header += "RXfixedgain,"
header += "Penetrationfilter,"
header += "Detectmode,"
header += "Phaseramp,"
header += "SpecialTVG,"
header += "Specialampdetect,"
header += "HiLofrequencyabsorptioncoefficientratio"
return header
def parameters(self):
"""this function returns the runtime record in a human readmable
format. there are 2 strings returned, teh header which changes
with every record and the paramters which only change when the
user changes a setting. this means we can reduce duplicate
records by testing the parameters string for changes"""
s = "%s,%d," % (self.operatorStationStatus, self.processingUnitStatus)
s += "%d,%d," % (self.BSPStatus, self.sonarHeadStatus)
s += "%d,%s,%s,%d,%s," % (
self.mode,
self.dualSwathMode,
self.TXPulseForm,
self.filterIdentifier,
self.filterSetting,
)
s += "%.3f,%.3f," % (self.minimumDepth, self.maximumDepth)
s += "%.3f,%.3f," % (self.absorptionCoefficient, self.transmitPulseLength)
s += "%.3f,%.3f," % (self.transmitBeamWidth, self.transmitPower)
s += "%.3f,%.3f," % (self.receiveBeamWidth, self.receiveBandwidth)
s += "%d,%.3f," % (self.mode2, self.tvg)
s += "%s,%d," % (self.sourceOfSpeedSound, self.maximumPortWidth)
s += "%s,%d," % (self.beamSpacingString, self.maximumPortCoverageDegrees)
s += "%s,%s,%d," % (
self.yawMode,
self.yawAndPitchStabilisationMode,
self.maximumStbdCoverageDegrees,
)
s += "%d,%d," % (self.maximumStbdWidth, self.transmitAAlongTilt)
s += "%s,%s,%s," % (self.filterIdentifier2, self.DepthMode, self.RXarrayuse)
s += "%s,%s,%s," % (self.Sonarheaduse, self.Pulselength, self.RXfixedgain)
s += "%s,%s,%s," % (self.Penetrationfilter, self.Detectmode, self.Phaseramp)
s += "%s,%s,%s" % (self.SpecialTVG, self.Specialampdetect, self.HiLo)
return s
def __str__(self):
"""this function returns the runtime record in a human readmable
format. there are 2 strings returned, teh header which changes
with every record and the paramters which only change when the
user changes a setting. this means we can reduce duplicate
records by testing the parameters string for changes"""
s = "%s,%d," % (self.typeOfDatagram, self.EMModel)
s += "%s,%.3f," % (self.RecordDate, self.Time)
s += "%d,%d," % (self.Counter, self.SerialNumber)
s += "%s,%d," % (self.operatorStationStatus, self.processingUnitStatus)
s += "%d,%d," % (self.BSPStatus, self.sonarHeadStatus)
s += "%d,%s,%s,%d,%s," % (
self.mode,
self.dualSwathMode,
self.TXPulseForm,
self.filterIdentifier,
self.filterSetting,
)
s += "%.3f,%.3f," % (self.minimumDepth, self.maximumDepth)
s += "%.3f,%.3f," % (self.absorptionCoefficient, self.transmitPulseLength)
s += "%.3f,%.3f," % (self.transmitBeamWidth, self.transmitPower)
s += "%.3f,%.3f," % (self.receiveBeamWidth, self.receiveBandwidth)
s += "%d,%.3f," % (self.mode2, self.tvg)
s += "%d,%d," % (self.sourceOfSpeedSound, self.maximumPortWidth)
s += "%.3f,%d," % (self.beamSpacingString, self.maximumPortCoverageDegrees)
s += "%s,%s,%d," % (
self.yawMode,
self.yawAndPitchStabilisationMode,
self.maximumStbdCoverageDegrees,
)
s += "%d,%d," % (self.maximumStbdWidth, self.transmitAAlongTilt)
s += "%s,%s,%s," % (self.filterIdentifier2, self.DepthMode, self.RXarrayuse)
s += "%s,%s,%s," % (self.Sonarheaduse, self.Pulselength, self.RXfixedgain)
s += "%s,%s,%s," % (self.Penetrationfilter, self.Detectmode, self.Phaseramp)
s += "%s,%s,%s" % (self.SpecialTVG, self.Specialampdetect, self.HiLo)
return s
# return pprint.pformat(vars(self))
class UNKNOWN_RECORD:
"""used as a convenience tool for datagrams we have no bespoke classes.
Better to make a bespoke class"""
def __init__(self, fileptr, numberOfBytes, typeOfDatagram):
self.typeOfDatagram = typeOfDatagram
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
def read(self):
self.data = self.fileptr.read(self.numberOfBytes)
class U_SVP:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "U"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = []
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHLLHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.ProfileDate = s[8]
self.ProfileTime = s[9]
self.NEntries = s[10]
self.DepthResolution = s[11]
rec_fmt = "=LL"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
# i = 0
for i in range(self.NEntries):
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.data.append(
[float(s[0]) / float(100 / self.DepthResolution), float(s[1] / 10)]
)
# read an empty byte
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
class X_DEPTH:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "X"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLL4Hf2Hf4B"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = s[5] / 1000.0
self.Counter = s[6]
self.SerialNumber = s[7]
self.Heading = s[8] / 100.0
self.SoundSpeedAtTransducer = s[9] / 10.0
self.TransducerDepth = s[10]
self.NBeams = s[11]
self.NValidDetections = s[12]
self.SampleFrequency = s[13]
self.ScanningInfo = s[14]
self.spare1 = s[15]
self.spare2 = s[16]
self.spare3 = s[17]
self.Depth = [0 for i in range(self.NBeams)]
self.AcrossTrackDistance = [0 for i in range(self.NBeams)]
self.AlongTrackDistance = [0 for i in range(self.NBeams)]
self.DetectionWindowsLength = [0 for i in range(self.NBeams)]
self.QualityFactor = [0 for i in range(self.NBeams)]
self.BeamIncidenceAngleAdjustment = [0 for i in range(self.NBeams)]
self.DetectionInformation = [0 for i in range(self.NBeams)]
self.RealtimeCleaningInformation = [0 for i in range(self.NBeams)]
self.Reflectivity = [0 for i in range(self.NBeams)]
# # now read the variable part of the Record
rec_fmt = "=fffHBBBbh"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
for i in range(self.NBeams):
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.Depth[i] = s[0]
self.AcrossTrackDistance[i] = s[1]
self.AlongTrackDistance[i] = s[2]
self.DetectionWindowsLength[i] = s[3]
self.QualityFactor[i] = s[4]
self.BeamIncidenceAngleAdjustment[i] = s[5] / 10.0
self.DetectionInformation[i] = s[6]
self.RealtimeCleaningInformation[i] = s[7]
self.Reflectivity[i] = s[8] / 10.0
# now do some sanity checks. We have examples where the Depth
# and Across track values are NaN
if math.isnan(self.Depth[i]):
self.Depth[i] = 0
if math.isnan(self.AcrossTrackDistance[i]):
self.AcrossTrackDistance[i] = 0
if math.isnan(self.AlongTrackDistance[i]):
self.AlongTrackDistance[i] = 0
rec_fmt = "=BBH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
data = self.fileptr.read(rec_len)
s = rec_unpack(data)
self.ETX = s[1]
self.checksum = s[2]
def encode(self):
"""Encode a Depth XYZ datagram record"""
header_fmt = "=LBBHLL4Hf2Hf4B"
header_len = struct.calcsize(header_fmt)
fullDatagram = bytearray()
rec_fmt = "=fffHBBBbh"
rec_len = struct.calcsize(rec_fmt)
footer_fmt = "=BBH"
footer_len = struct.calcsize(footer_fmt)
fullDatagramByteCount = header_len + (rec_len * self.NBeams) + footer_len
# pack the header
recordTime = int(dateToSecondsSinceMidnight(from_timestamp(self.Time)) * 1000)
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
self.STX,
ord(self.typeOfDatagram),
self.EMModel,
self.RecordDate,
recordTime,
self.Counter,
self.SerialNumber,
int(self.Heading * 100),
int(self.SoundSpeedAtTransducer * 10),
self.TransducerDepth,
self.NBeams,
self.NValidDetections,
self.SampleFrequency,
self.ScanningInfo,
self.spare1,
self.spare2,
self.spare3,
)
fullDatagram = fullDatagram + header
# pack the beam summary info
for i in range(self.NBeams):
bodyRecord = struct.pack(
rec_fmt,
self.Depth[i],
self.AcrossTrackDistance[i],
self.AlongTrackDistance[i],
self.DetectionWindowsLength[i],
self.QualityFactor[i],
int(self.BeamIncidenceAngleAdjustment[i] * 10),
self.DetectionInformation[i],
self.RealtimeCleaningInformation[i],
int(self.Reflectivity[i] * 10),
)
fullDatagram = fullDatagram + bodyRecord
systemDescriptor = 1
tmp = struct.pack("=B", systemDescriptor)
fullDatagram = fullDatagram + tmp
# now pack the footer
ETX = 3
checksum = 0
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
class Y_SEABEDIMAGE:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "Y"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
self.ARC = {}
self.BeamPointingAngle = []
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHfHhhHHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.SampleFrequency = s[8]
self.RangeToNormalIncidence = s[9]
self.NormalIncidence = float(s[10] * 0.1) # [dB]
self.ObliqueBS = float(s[11] * 0.1) # [dB]
self.TxBeamWidth = float(s[12] * 0.1) # [deg]
self.TVGCrossOver = float(s[13] * 0.1) # [deg]
self.NumBeams = s[14]
self.beams = []
self.numSamples = 0
self.samples = []
rec_fmt = "=bBHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
self.numSamples = 0
for i in range(self.NumBeams):
s = rec_unpack(self.fileptr.read(rec_len))
b = cBeam(s, 0)
self.numSamples = self.numSamples + b.numberOfSamplesPerBeam
self.beams.append(b)
rec_fmt = "=" + str(self.numSamples) + "h"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
self.samples = rec_unpack(self.fileptr.read(rec_len))
# allocate the samples to the correct beams so it is easier to use
sampleIDX = 0
for b in self.beams:
b.samples = self.samples[sampleIDX : sampleIDX + b.numberOfSamplesPerBeam]
sampleIDX = sampleIDX + b.numberOfSamplesPerBeam
# read an empty byte
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
###############################################################################
def encode(self):
"""Encode a seabed image datagram record"""
header_fmt = "=LBBHLLHHfHhhHHH"
header_len = struct.calcsize(header_fmt)
fullDatagram = bytearray()
rec_fmt = "=bBHH"
rec_len = struct.calcsize(rec_fmt)
sample_fmt = "=" + str(self.numSamples) + "h"
sample_len = struct.calcsize(sample_fmt)
footer_fmt = "=BBH"
footer_len = struct.calcsize(footer_fmt)
fullDatagramByteCount = (
header_len + (rec_len * self.NumBeams) + sample_len + footer_len
)
# pack the header
recordTime = int(dateToSecondsSinceMidnight(from_timestamp(self.Time)) * 1000)
header = struct.pack(
header_fmt,
fullDatagramByteCount - 4,
self.STX,
ord(self.typeOfDatagram),
self.EMModel,
self.RecordDate,
recordTime,
self.Counter,
self.SerialNumber,
self.SampleFrequency,
self.RangeToNormalIncidence,
self.NormalIncidence,
self.ObliqueBS,
self.TxBeamWidth,
self.TVGCrossOver,
self.NumBeams,
)
fullDatagram = fullDatagram + header
# pack the beam summary info
s = []
for i, b in enumerate(self.beams):
bodyRecord = struct.pack(
rec_fmt,
b.sortingDirection,
b.detectionInfo,
b.numberOfSamplesPerBeam,
b.centreSampleNumber,
)
fullDatagram = fullDatagram + bodyRecord
# using the takeoffangle, we need to look up the correction from
# the ARC and apply it to the samples.
a = round(self.BeamPointingAngle[i], 0)
correction = self.ARC[a]
for sample in b.samples:
s.append(int(sample + correction))
sampleRecord = struct.pack(sample_fmt, *s)
fullDatagram = fullDatagram + sampleRecord
systemDescriptor = 1
tmp = struct.pack("=B", systemDescriptor)
fullDatagram = fullDatagram + tmp
# now pack the footer
ETX = 3
checksum = 0
footer = struct.pack("=BH", ETX, checksum)
fullDatagram = fullDatagram + footer
return fullDatagram
class k_WATERCOLUMN:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "k"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
self.ARC = {}
self.BeamPointingAngle = []
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=LBBHLLHHHHHHHHLhBbBxxx"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
# Number of datagrams to complete the diagram
self.NumberOfDatagrams = s[8]
self.DatagramNumbers = s[9] # Current datagram index
self.NumTransmitSector = s[10] # 1 to 20 (Ntx)
self.NumReceiveBeamsTotal = s[11] # Nrx for all datagrams
self.NumReceiveBeams = s[12] # Nrx current datagram
self.SoundSpeed = s[13] / 10.0
self.SampleFrequency = s[14] / 100.0
self.TransmitHeave = s[15] / 100.0
self.TVGFunction = s[16]
self.TVGOffset = s[17]
self.ScanningInfo = s[18]
# TX record
rec_fmt = "=hHBx"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
self.TX_TiltAngle = []
self.TX_CenterFrequency = []
self.TX_SectorNumber = []
for i in range(self.NumTransmitSector):
data = self.fileptr.read(rec_len)
tx = rec_unpack(data)
self.TX_TiltAngle.append(tx[0])
self.TX_CenterFrequency.append(tx[1])
self.TX_SectorNumber.append(tx[2])
# read an empty byte
# self.fileptr.read(1)
# RX record
rx_rec_fmt = "=hHHHBB"
rx_rec_len = struct.calcsize(rx_rec_fmt)
rx_rec_unpack = struct.Struct(rx_rec_fmt).unpack
self.RX_PointingAngle = []
self.RX_StartRange = []
self.RX_NumSamples = []
self.RX_DetectedRange = []
self.RX_TransmitSector = []
self.RX_BeamNumber = []
self.RX_Samples = [] # List of lists
tmp = []
for i in range(self.NumReceiveBeams):
data = self.fileptr.read(rx_rec_len)
rx = rx_rec_unpack(data)
tmp.append(rx[3])
self.RX_PointingAngle.append(rx[0] / 100.0)
self.RX_StartRange.append(rx[1])
self.RX_NumSamples.append(rx[2])
self.RX_DetectedRange.append(rx[3])
self.RX_TransmitSector.append(rx[4])
self.RX_BeamNumber.append(rx[5])
rxs_rec_fmt = "={}b".format(rx[2])
rxs_rec_len = struct.calcsize(rxs_rec_fmt)
rxs_rec_unpack = struct.Struct(rxs_rec_fmt).unpack
data = self.fileptr.read(rxs_rec_len)
rxs = rxs_rec_unpack(data)
self.RX_Samples.append(rxs)
# read an empty byte
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
def encode(self):
"""Encode a water column datagram record"""
raise NotImplementedError("Not implemented")
def dr_meters(self):
return [
self.SoundSpeed * dr / (self.SampleFrequency * 2)
for dr in self.RX_DetectedRange
]
def r_max(self):
return self.SoundSpeed * max(self.RX_DetectedRange) / (self.SampleFrequency * 2)
class S_SEABEDIMAGE:
def __init__(self, fileptr, numberOfBytes):
self.typeOfDatagram = "S"
self.offset = fileptr.tell()
self.numberOfBytes = numberOfBytes
self.fileptr = fileptr
self.fileptr.seek(numberOfBytes, 1)
self.data = ""
self.ARC = {}
self.BeamPointingAngle = []
def read(self):
self.fileptr.seek(self.offset, 0)
rec_fmt = "=L2BH2L7H2bH2B"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack_from
s = rec_unpack(self.fileptr.read(rec_len))
# self.numberOfBytes= s[0]
self.STX = s[1]
self.typeOfDatagram = chr(s[2])
self.EMModel = s[3]
self.RecordDate = s[4]
self.Time = float(s[5] / 1000.0)
self.Counter = s[6]
self.SerialNumber = s[7]
self.MeanAbsorption = float(s[8] * 0.01) # [dB/km]
self.PulseLength = s[9]
self.RangeToNormalIncidence = s[10]
self.StartRangeSampleOfTVGRamp = s[11]
self.StopRangeSampleOfTVGRamp = s[12]
self.NormalIncidence = float(s[13] * 0.1) # [dB]
self.ObliqueBS = float(s[14] * 0.1) # [dB]
self.TxBeamWidth = float(s[15] * 0.1) # [deg]
self.TVGCrossOver = float(s[16] * 0.1) # [deg]
self.NumBeams = s[17]
self.beams = []
self.numSamples = 0
self.samples = []
rec_fmt = "=bBHH"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
self.numSamples = 0
for i in range(self.NumBeams):
s = rec_unpack(self.fileptr.read(rec_len))
b = cBeam(s, 0)
self.numSamples = self.numSamples + b.numberOfSamplesPerBeam
self.beams.append(b)
rec_fmt = "=" + str(self.numSamples) + "b"
rec_len = struct.calcsize(rec_fmt)
rec_unpack = struct.Struct(rec_fmt).unpack
self.samples = rec_unpack(self.fileptr.read(rec_len))
# allocate the samples to the correct beams so it is easier to use
sampleIDX = 0
for b in self.beams:
b.samples = self.samples[sampleIDX : sampleIDX + b.numberOfSamplesPerBeam]
sampleIDX = sampleIDX + b.numberOfSamplesPerBeam
# read an empty byte if necessary
if (rec_len + self.numberOfBytes + 3) % 2 != 0:
self.fileptr.read(1)
# now read the footer
self.ETX, self.checksum = readFooter(self.numberOfBytes, self.fileptr)
# TIME HELPER FUNCTIONS
def to_timestamp(dateObject):
return (dateObject - datetime(1970, 1, 1)).total_seconds()
def to_DateTime(recordDate, recordTime):
"""return a python date object from a split date and time record. works
with kongsberg date and time structures"""
date_object = datetime.strptime(str(recordDate), "%Y%m%d") + timedelta(
0, recordTime
)
return date_object
def from_timestamp(unixtime):
return datetime.utcfromtimestamp(unixtime)
def dateToKongsbergDate(dateObject):
return dateObject.strftime("%Y%m%d")
def dateToKongsbergTime(dateObject):
return dateObject.strftime("%H%M%S")
def dateToSecondsSinceMidnight(dateObject):
return (
dateObject - dateObject.replace(hour=0, minute=0, second=0, microsecond=0)
).total_seconds()
# bitwise helper functions
def isBitSet(int_type, offset):
"""testBit() returns a nonzero result, 2**offset, if the bit at 'offset'
is one."""
mask = 1 << offset
return (int_type & (1 << offset)) != 0
def set_bit(value, bit):
return value | (1 << bit)
if __name__ == "__main__":
main()
# exit()
|
the-stack_106_32195 | # -*- coding: utf-8 -*-
"""
Unit tests for landlab.components.soil_moisture.SoilInfiltrationGreenAmpt
last updated: 3/14/16
"""
import numpy as np
from landlab import RasterModelGrid
from landlab.components.soil_moisture import SoilInfiltrationGreenAmpt
(_SHAPE, _SPACING, _ORIGIN) = ((10, 10), (25, 25), (0.0, 0.0))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def test_SI_name(si):
assert si.name == "SoilInfiltrationGreenAmpt"
def test_SI_input_var_names(si):
assert si.input_var_names == (
"soil_water_infiltration__depth",
"surface_water__depth",
)
def test_SI_output_var_names(si):
assert si.output_var_names == (
"soil_water_infiltration__depth",
"surface_water__depth",
)
def test_SI_var_units(si):
assert set(si.input_var_names) | set(si.output_var_names) == set(
dict(si.units).keys()
)
assert si.var_units("surface_water__depth") == "m"
assert si.var_units("soil_water_infiltration__depth") == "m"
def test_grid_shape(si):
assert si.grid.number_of_node_rows == _SHAPE[0]
assert si.grid.number_of_node_columns == _SHAPE[1]
def test_calc_soil_pressure(si):
np.testing.assert_almost_equal(
si.calc_soil_pressure("silt loam"), 0.1647870740305523, decimal=6
)
def test_calc_soil_head(si):
soil_props = SoilInfiltrationGreenAmpt.SOIL_PROPS["loam"]
np.testing.assert_almost_equal(
si.calc_pressure_head(soil_props[0], soil_props[1]), 0.087498292, decimal=6
)
def test_calc_moisture_deficit(si):
np.testing.assert_almost_equal(
si.calc_moisture_deficit(
soil_bulk_density=1700.0,
rock_density=2650.0,
volume_fraction_coarse_fragments=0.0,
soil_moisture_content=0.2,
),
0.15849056603,
decimal=6,
)
def test_run_one_step():
grid = RasterModelGrid((10, 10), xy_spacing=25)
grid.add_ones("soil_water_infiltration__depth", at="node", dtype=float)
grid.add_ones("surface_water__depth", at="node")
hydraulic_conductivity = 2.5 * (10**-6)
grid["node"]["surface_water__depth"] *= 5.0
grid["node"]["soil_water_infiltration__depth"] *= 10**-5
SI = SoilInfiltrationGreenAmpt(
grid,
hydraulic_conductivity=hydraulic_conductivity,
soil_bulk_density=1700.0,
rock_density=2650.0,
initial_soil_moisture_content=0.2,
soil_type="silt loam",
volume_fraction_coarse_fragments=0.6,
coarse_sed_flag=False,
surface_water_minimum_depth=1.0e-7,
soil_pore_size_distribution_index=None,
soil_bubbling_pressure=None,
wetting_front_capillary_pressure_head=None,
)
SI.run_one_step(dt=5)
np.testing.assert_almost_equal(
grid["node"]["surface_water__depth"][0], 3.97677483519, decimal=6
)
|
the-stack_106_32196 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Summary collector callback."""
import os
import re
import json
from importlib import import_module
import numpy as np
from mindspore import log as logger
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.train.summary.summary_record import SummaryRecord
from mindspore.train.summary.enum import PluginEnum, ModeEnum
from mindspore.train.callback import Callback, ModelCheckpoint
from mindspore.train import lineage_pb2
from mindspore.train.callback._dataset_graph import DatasetGraph
from mindspore.nn.optim.optimizer import Optimizer
from mindspore.nn.loss.loss import _Loss
from mindspore.train._utils import check_value_type
class LineageMetadata:
"""Initialize parameters used in model lineage management."""
train_dataset_path = 'train_dataset_path'
valid_dataset_path = 'valid_dataset_path'
train_network = 'train_network'
loss_function = 'loss_function'
loss = 'loss'
optimizer = 'optimizer'
learning_rate = 'learning_rate'
epoch = 'epoch'
step_num = 'step_num'
parallel_mode = 'parallel_mode'
device_num = 'device_num'
batch_size = 'batch_size'
model_path = 'model_path'
model_ckpt = 'model_ckpt'
model_size = 'model_size'
metrics = 'metrics'
train_dataset_size = 'train_dataset_size'
valid_dataset_size = 'valid_dataset_size'
class SummaryCollector(Callback):
"""
SummaryCollector can help you to collect some common information.
It can help you to collect loss, learning late, computational graph and so on.
SummaryCollector also persists data collected by the summary operator into a summary file.
Note:
1. Multiple SummaryCollector instances in callback list are not allowed.
2. Not all information is collected at the training phase or at the eval phase.
3. SummaryCollector always record the data collected by the summary operator.
Args:
summary_dir (str): The collected data will be persisted to this directory.
If the directory does not exist, it will be created automatically.
collect_freq (int): Set the frequency of data collection, it should be greater then zero,
and the unit is `step`. Default: 10. If a frequency is set, we will collect data
at (current steps % freq) == 0, and the first step will be collected at any time.
It is important to note that if the data sink mode is used, the unit will become the `epoch`.
It is not recommended to collect data too frequently, which can affect performance.
collect_specified_data (Union[None, dict]): Perform custom operations on the collected data. Default: None.
By default, if set to None, all data is collected as the default behavior.
If you want to customize the data collected, you can do so with a dictionary.
Examples,you can set {'collect_metric': False} to control not collecting metrics.
The data that supports control is shown below.
- collect_metric: Whether to collect training metrics, currently only loss is collected.
The first output will be treated as loss, and it will be averaged.
Optional: True/False. Default: True.
- collect_graph: Whether to collect computational graph, currently only
training computational graph is collected. Optional: True/False. Default: True.
- collect_train_lineage: Whether to collect lineage data for the training phase,
this field will be displayed on the lineage page of Mindinsight. Optional: True/False. Default: True.
- collect_eval_lineage: Whether to collect lineage data for the eval phase,
this field will be displayed on the lineage page of Mindinsight. Optional: True/False. Default: True.
- collect_input_data: Whether to collect dataset for each training. Currently only image data is supported.
Optional: True/False. Default: True.
- collect_dataset_graph: Whether to collect dataset graph for the training phase.
Optional: True/False. Default: True.
- histogram_regular: Collect weight and bias for parameter distribution page display in MindInsight.
This field allows regular strings to control which parameters to collect.
Default: None, it means only the first five parameters are collected.
It is not recommended to collect too many parameters at once, as it can affect performance.
Note that if you collect too many parameters and run out of memory, the training will fail.
keep_default_action (bool): This field affects the collection behavior of the 'collect_specified_data' field.
Optional: True/False, Default: True.
True: means that after specified data is set, non-specified data is collected as the default behavior.
False: means that after specified data is set, only the specified data is collected,
and the others are not collected.
custom_lineage_data (Union[dict, None]): Allows you to customize the data and present it on the MingInsight
lineage page. In the custom data, the key type support str, and the value type support str/int/float.
Default: None, it means there is no custom data.
collect_tensor_freq (Optional[int]): Same semantic as the `collect_freq`, but controls TensorSummary only.
Because TensorSummary data is too large compared to other summary data, this parameter is used to reduce
its collection. By default, TensorSummary data will be collected at most 20 steps, but not more than how
many steps other summary data will be collected.
Default: None, which means to follow the behavior as described above. For example, given `collect_freq=10`,
when the total steps is 600, TensorSummary will be collected 20 steps, while other summary data 61 steps,
but when the total steps is 20, both TensorSummary and other summary will be collected 3 steps.
Also note that when in parallel mode, the total steps will be splitted evenly, which will
affect how many steps TensorSummary will be collected.
max_file_size (Optional[int]): The maximum size in bytes each file can be written to the disk.
Default: None, which means no limit. For example, to write not larger than 4GB,
specify `max_file_size=4 * 1024**3`.
Raises:
ValueError: If the parameter value is not expected.
TypeError: If the parameter type is not expected.
RuntimeError: If an error occurs during data collection.
Examples:
>>> # Simple usage:
>>> summary_collector = SummaryCollector(summary_dir='./summary_dir')
>>> model.train(epoch, dataset, callbacks=summary_collector)
>>>
>>> # Do not collect metric and collect the first layer parameter, others are collected by default
>>> specified={'collect_metric': False, 'histogram_regular': '^conv1.*'}
>>> summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_specified_data=specified)
>>> model.train(epoch, dataset, callbacks=summary_collector)
>>>
>>> # Only collect metric, custom lineage data and record data that collected by the summary operator,
>>> # others are not collected
>>> specified = {'collect_metric': True}
>>> summary_collector = SummaryCollector('./summary_dir',
>>> collect_specified_data=specified,
>>> keep_default_action=False,
>>> custom_lineage_data={'version': 'resnet50_v1'}
>>> )
>>> model.train(epoch, dataset, callbacks=summary_collector)
"""
_DEFAULT_SPECIFIED_DATA = {
'collect_metric': True,
'collect_graph': True,
'collect_train_lineage': True,
'collect_eval_lineage': True,
'collect_input_data': True,
'collect_dataset_graph': True,
'histogram_regular': None
}
def __init__(self,
summary_dir,
collect_freq=10,
collect_specified_data=None,
keep_default_action=True,
custom_lineage_data=None,
collect_tensor_freq=None,
max_file_size=None):
super(SummaryCollector, self).__init__()
self._summary_dir = self._process_summary_dir(summary_dir)
self._record = None
self._check_positive('collect_freq', collect_freq)
self._collect_freq = collect_freq
self._check_positive('collect_tensor_freq', collect_tensor_freq, allow_none=True)
self._collect_tensor_freq = collect_tensor_freq
self._tensor_collect_range = None
self._check_positive('max_file_size', max_file_size, allow_none=True)
self._max_file_size = max_file_size
self._check_action(keep_default_action)
self._collect_specified_data = self._process_specified_data(collect_specified_data, keep_default_action)
msg = f"For 'collect_specified_data' the value after processing is: {self._collect_specified_data}."
logger.info(msg)
self._check_custom_lineage_data(custom_lineage_data)
self._custom_lineage_data = custom_lineage_data
self._temp_optimizer = None
self._has_saved_graph = False
self._has_saved_custom_data = False
self._is_parse_loss_success = True
self._first_step = True
self._dataset_sink_mode = True
def __enter__(self):
self._record = SummaryRecord(log_dir=self._summary_dir, max_file_size=self._max_file_size)
self._first_step, self._dataset_sink_mode = True, True
return self
def __exit__(self, *err):
self._record.close()
@staticmethod
def _process_summary_dir(summary_dir):
"""Check the summary dir, and create a new directory if it not exists."""
check_value_type('summary_dir', summary_dir, str)
summary_dir = summary_dir.strip()
if not summary_dir:
raise ValueError('For `summary_dir` the value should be a valid string of path, but got empty string.')
summary_dir = os.path.realpath(summary_dir)
if not os.path.exists(summary_dir):
os.makedirs(summary_dir, exist_ok=True)
else:
if not os.path.isdir(summary_dir):
raise NotADirectoryError('For `summary_dir` it should be a directory path.')
return summary_dir
@staticmethod
def _check_positive(name, value, allow_none=False):
"""Check if the value to be int type and positive."""
if allow_none and value is None:
return
check_value_type(name, value, int)
if value <= 0:
raise ValueError(f'For `{name}` the value should be greater than 0, but got `{value}`.')
@staticmethod
def _check_custom_lineage_data(custom_lineage_data):
"""
Check user custom lineage data.
Args:
custom_lineage_data (dict): The user custom defined data.
Raises:
TypeError: If the type of parameters is invalid.
"""
if custom_lineage_data is None:
return
check_value_type('custom_lineage_data', custom_lineage_data, [dict, type(None)])
for key, value in custom_lineage_data.items():
check_value_type(f'custom_lineage_data -> {key}', key, str)
check_value_type(f'the value of custom_lineage_data -> {key}', value, (int, str, float))
@staticmethod
def _check_action(action):
"""Check action type."""
check_value_type('keep_default_action', action, bool)
def _process_specified_data(self, specified_data, action):
"""Check specified data type and value."""
if specified_data is None:
if action:
return self._DEFAULT_SPECIFIED_DATA
return dict()
check_value_type('collect_specified_data', specified_data, [dict, type(None)])
for param_name in specified_data:
check_value_type(param_name, param_name, [str])
unexpected_params = set(specified_data) - set(self._DEFAULT_SPECIFIED_DATA)
if unexpected_params:
raise ValueError(f'For `collect_specified_data` the keys {unexpected_params} are unsupported, '
f'expect the follow keys: {list(self._DEFAULT_SPECIFIED_DATA.keys())}')
if 'histogram_regular' in specified_data:
check_value_type('histogram_regular', specified_data.get('histogram_regular'), (str, type(None)))
bool_items = set(self._DEFAULT_SPECIFIED_DATA) - {'histogram_regular'}
for item in bool_items:
if item in specified_data:
check_value_type(item, specified_data.get(item), bool)
if action:
result = dict(self._DEFAULT_SPECIFIED_DATA)
result.update(specified_data)
else:
result = specified_data
return result
def begin(self, run_context):
cb_params = run_context.original_args()
self._check_callbacks(cb_params)
if cb_params.mode not in ModeEnum.to_list():
raise ValueError('Only support `train` (model.train) and `eval` (model.eval) mode, '
'but got `{cb_params.mode}` mode.')
self._record.set_mode(cb_params.mode)
def step_end(self, run_context):
cb_params = run_context.original_args()
if cb_params.mode != ModeEnum.TRAIN.value:
return
if not self._has_saved_graph:
self._collect_graphs(cb_params)
self._collect_dataset_graph(cb_params)
self._has_saved_graph = True
self._record.record(cb_params.cur_step_num)
if self._custom_lineage_data and not self._has_saved_custom_data:
packaged_custom_data = self._package_custom_lineage_data(self._custom_lineage_data)
self._record.add_value('custom_lineage_data', 'custom_lineage_data', packaged_custom_data)
self._has_saved_custom_data = True
self._record.record(cb_params.cur_step_num)
if self._first_step:
# Notice: This way of determining whether dataset sink mode is True does not work in the eval scenario
self._dataset_sink_mode = cb_params.cur_step_num == cb_params.batch_num
self._tensor_collect_range = self._get_tensor_collect_range(cb_params, self._dataset_sink_mode)
self._collect_at_step_end(cb_params, plugin_filter=None)
self._first_step = False
else:
current = cb_params.cur_epoch_num if self._dataset_sink_mode else cb_params.cur_step_num
if current % self._collect_freq == 0 and current in self._tensor_collect_range:
self._collect_at_step_end(cb_params, plugin_filter=None)
elif current in self._tensor_collect_range:
self._collect_at_step_end(cb_params, lambda plugin: plugin == PluginEnum.TENSOR.value)
elif current % self._collect_freq == 0:
self._collect_at_step_end(cb_params, lambda plugin: plugin != PluginEnum.TENSOR.value)
def _get_tensor_collect_range(self, cb_params, dataset_sink_mode):
"""Get tensor collect range."""
total_step = cb_params.epoch_num
if not dataset_sink_mode:
total_step *= cb_params.batch_num
if self._collect_tensor_freq is not None:
# `total_step + 1`: `total_step` would be a value of `cb_params.cur_step_num`.
return range(0, total_step + 1, self._collect_tensor_freq)
summary_to_collect = len(range(0, total_step + 1, self._collect_freq))
default_tensor_summary_limit = 20
if summary_to_collect > default_tensor_summary_limit:
tensor_freq = total_step // (default_tensor_summary_limit - 1)
if tensor_freq > 1:
return range(0, total_step + 1, tensor_freq)[:default_tensor_summary_limit]
# `cb_params.cur_step_num` counting from `1`, when `1` is in the range, take `1` more steps.
return range(0, total_step + 1)[:default_tensor_summary_limit + 1]
return range(0, total_step + 1, self._collect_freq)
def _collect_at_step_end(self, cb_params, plugin_filter):
self._collect_input_data(cb_params)
self._collect_metric(cb_params)
self._collect_histogram(cb_params)
self._record.record(cb_params.cur_step_num, plugin_filter=plugin_filter)
def end(self, run_context):
cb_params = run_context.original_args()
if cb_params.mode == ModeEnum.TRAIN.value:
self._collect_train_lineage(cb_params)
else:
self._collect_eval_lineage(cb_params)
# This is a workaround to avoid record '_summary_tensor_cache'.
self._record.set_mode('eval')
# There's nothing special about setting step to 0 here, just to satisfy the interface call
self._record.record(step=0)
def _check_callbacks(self, cb_params):
"""Check there if there are duplicate instances of SummaryCollector."""
callbacks = cb_params.list_callback
is_find = False
for callback in callbacks:
if type(callback).__name__ == self.__class__.__name__:
if not is_find:
is_find = True
continue
raise ValueError(f"There are more than one {self.__class__.__name__} instance in callback list,"
f"but expected only one {self.__class__.__name__} instance.")
@staticmethod
def _package_custom_lineage_data(custom_lineage_data):
"""
Package user-defined lineage data into binary data.
Args:
custom_lineage_data (dict): User custom lineage data.
Returns:
UserDefinedInfo, a object of lineage_pb2.UserDefinedInfo.
"""
user_defined_info = lineage_pb2.UserDefinedInfo()
for key, value in custom_lineage_data.items():
if isinstance(value, int):
attr_name = "map_int32"
elif isinstance(value, float):
attr_name = "map_double"
else:
attr_name = "map_str"
user_info = user_defined_info.user_info.add()
getattr(user_info, attr_name)[key] = value
return user_defined_info
def _collect_input_data(self, cb_params):
"""Only support to collect image data."""
if not self._collect_specified_data.get('collect_input_data'):
return
input_data = getattr(cb_params, 'train_dataset_element', None)
if input_data is None:
self._collect_specified_data['collect_input_data'] = False
logger.info("The 'train_dataset_element' in cb_params is None, maybe there is dataset sink mode.")
return
if isinstance(input_data, (list, tuple)):
input_data = input_data[0]
try:
self._record.add_value(PluginEnum.IMAGE.value, 'input_data/auto', input_data)
except ValueError:
logger.warning('The input data of network are not image, so will not collect by SummaryCollector.')
self._collect_specified_data['collect_input_data'] = False
return
def _collect_dataset_graph(self, cb_params):
"""Only collect train dataset graph."""
if not self._collect_specified_data.get('collect_dataset_graph'):
return
# After analysis, we think that the validated dataset graph and the training dataset graph
# should be consistent under normal scenarios, so only the training dataset graph is collected.
if cb_params.mode == ModeEnum.TRAIN.value:
train_dataset = cb_params.train_dataset
dataset_graph = DatasetGraph()
graph_bytes = dataset_graph.package_dataset_graph(train_dataset)
self._record.add_value('dataset_graph', 'train_dataset', graph_bytes)
def _collect_graphs(self, cb_params):
"""Collect the graph of train network and eval network."""
if not self._collect_specified_data.get('collect_graph'):
return
network = cb_params.train_network if cb_params.mode == ModeEnum.TRAIN.value else cb_params.eval_network
graph_proto = network.get_func_graph_proto()
if graph_proto is None:
return
self._record.add_value(PluginEnum.GRAPH.value, 'train_network/auto', graph_proto)
def _collect_metric(self, cb_params):
"""Collect metric, currently only collection Loss is supported."""
if not self._collect_specified_data.get('collect_metric'):
return
loss = self._get_loss(cb_params)
if loss is None:
return
try:
self._record.add_value(PluginEnum.SCALAR.value, 'loss/auto', loss)
except ValueError:
logger.warning("The output of network is not a scalar, so will not collect loss in SummaryCollector.")
self._collect_specified_data['collect_metric'] = False
def _get_loss(self, cb_params):
"""
Get loss from the network output.
Args:
cb_params (_InternalCallbackParam): Callback parameters.
Returns:
Union[Tensor, None], if parse loss success, will return a Tensor value(shape is [1]), else return None.
"""
if not self._is_parse_loss_success:
# If parsing has failed before, avoid repeating it
return None
output = cb_params.net_outputs
if output is None:
logger.warning("Can not find any output by this network, so will not collect loss in SummaryCollector.")
self._is_parse_loss_success = False
return None
if isinstance(output, (int, float, Tensor)):
loss = output
elif isinstance(output, (list, tuple)) and output:
# If the output is a list, since the default network returns loss first,
# we assume that the first one is loss.
loss = output[0]
else:
logger.warning("The output type could not be identified, so no loss was recorded in SummaryCollector.")
self._is_parse_loss_success = False
return None
if not isinstance(loss, Tensor):
loss = Tensor(loss)
precision = 4
loss = Tensor(round(np.mean(loss.asnumpy()), precision))
return loss
def _get_optimizer(self, cb_params):
"""
Get optimizer from the cb_params or parse from the network.
Args:
cb_params (_InternalCallbackParam): Callback parameters.
Returns:
Union[Optimizer, None], if parse optimizer success, will return a optimizer, else return None.
"""
# 'optimizer_failed' means find optimizer failed, so we will not collect data about optimizer.
optimizer_failed = 'Failed'
if self._temp_optimizer == optimizer_failed:
return None
if self._temp_optimizer is not None:
return self._temp_optimizer
optimizer = cb_params.optimizer
if optimizer is None:
network = cb_params.train_network if cb_params.mode == 'train' else cb_params.eval_network
optimizer = self._parse_optimizer_by_network(network)
if optimizer is None or not isinstance(optimizer, Optimizer):
logger.warning("Can not find optimizer in network, or the optimizer does not inherit MindSpore's "
"optimizer, so we will not collect data about optimizer in SummaryCollector.")
optimizer = None
self._temp_optimizer = optimizer if optimizer is not None else optimizer_failed
return optimizer
@staticmethod
def _parse_optimizer_by_network(network):
"""Parse optimizer from network, if parse success will return a optimizer, else return None."""
optimizer = None
for _, cell in network.cells_and_names():
if isinstance(cell, Optimizer):
return cell
try:
optimizer = getattr(cell, 'optimizer')
except AttributeError:
continue
if not isinstance(optimizer, Optimizer):
continue
# Optimizer found successfully
break
return optimizer
def _collect_histogram(self, cb_params):
"""Collect histogram data, contain the parameter weight and bias."""
# Note: if there is not a key named `histogram_regular` in `self._collect_specified_data`,
# it means we will not collect histogram data.
if 'histogram_regular' not in self._collect_specified_data:
return
optimizer = self._get_optimizer(cb_params)
if optimizer is None:
return
parameters = optimizer.parameters
regular = self._collect_specified_data.get('histogram_regular')
if regular is not None:
for parameter in parameters:
if re.match(regular, parameter.name):
self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name+'/auto', parameter.data)
return
# Note: If `histogram_regular` in `self._collect_specified_data` and the value is None,
# we will collect the first five parameters.
default_parameter_count = 5
for parameter in parameters[:default_parameter_count]:
self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name+'/auto', parameter.data)
@staticmethod
def _get_learning_rate(optimizer):
"""
parse the learning rate from optimizer.
Args:
optimizer (Optimizer): A optimizer which inherit the MindSpore Optimizer class.
Returns:
Union[Tensor, None], if parse learning rate success, will return a Tensor, else return None.
"""
learning_rate = optimizer.learning_rate
if not isinstance(learning_rate, Parameter):
logger.warning("The learning rate detected in the optimizer "
"is not a Parameter type, so it is not recorded.")
return None
return learning_rate.data
def _collect_train_lineage(self, cb_params):
"""Collect train lineage data, the detail refer to lineage_pb2.TrainLineage."""
if not self._collect_specified_data.get('collect_train_lineage'):
return
train_lineage = {}
loss = self._get_loss(cb_params)
if loss:
loss_numpy = loss.asnumpy()
loss = float(np.atleast_1d(loss_numpy)[0])
train_lineage[LineageMetadata.loss] = loss
else:
train_lineage[LineageMetadata.loss] = None
optimizer = self._get_optimizer(cb_params)
learning_rate = self._get_learning_rate(optimizer) if optimizer is not None else None
if learning_rate is not None:
train_lineage[LineageMetadata.learning_rate] = list(np.atleast_1d(learning_rate.asnumpy()))[0]
else:
train_lineage[LineageMetadata.learning_rate] = None
train_lineage[LineageMetadata.optimizer] = type(optimizer).__name__ if optimizer else None
train_lineage[LineageMetadata.train_network] = type(cb_params.network).__name__
loss_fn = self._get_loss_fn(cb_params)
train_lineage[LineageMetadata.loss_function] = type(loss_fn).__name__ if loss_fn else None
train_lineage[LineageMetadata.epoch] = cb_params.epoch_num
train_lineage[LineageMetadata.step_num] = cb_params.cur_step_num
train_lineage[LineageMetadata.parallel_mode] = cb_params.parallel_mode
train_lineage[LineageMetadata.device_num] = cb_params.device_number
ckpt_file_path = self._get_ckpt_file_path(cb_params)
train_lineage[LineageMetadata.model_path] = json.dumps(dict(ckpt=ckpt_file_path))
model_size = os.path.getsize(ckpt_file_path) if ckpt_file_path else 0
train_lineage[LineageMetadata.model_size] = model_size
self._parse_dataset(cb_params, train_lineage)
train_lineage_message = self._package_train_lineage_message(train_lineage)
self._record.add_value(PluginEnum.TRAIN_LINEAGE.value, 'train_lineage', train_lineage_message)
@staticmethod
def _package_train_lineage_message(train_lineage):
"""
Package train lineage data into binary data.
Args:
train_lineage (dict): The train lineage dict, refer to the attribute of `_collect_train_lineage` method.
Returns:
TrainLineage, a object of lineage_pb2.TrainLineage.
"""
lineage_message = lineage_pb2.TrainLineage()
if train_lineage.get(LineageMetadata.train_network) is not None:
lineage_message.algorithm.network = train_lineage.get(LineageMetadata.train_network)
if train_lineage.get(LineageMetadata.loss) is not None:
lineage_message.algorithm.loss = train_lineage.get(LineageMetadata.loss)
# Construct train_dataset message.
if train_lineage.get(LineageMetadata.train_dataset_path) is not None:
lineage_message.train_dataset.train_dataset_path = train_lineage.get(LineageMetadata.train_dataset_path)
if train_lineage.get(LineageMetadata.train_dataset_size) is not None:
lineage_message.train_dataset.train_dataset_size = train_lineage.get(LineageMetadata.train_dataset_size)
# Construct model message
lineage_message.model.path = train_lineage.get(LineageMetadata.model_path)
lineage_message.model.size = train_lineage.get(LineageMetadata.model_size)
# Construct hyper_parameters message.
if train_lineage.get(LineageMetadata.learning_rate) is not None:
lineage_message.hyper_parameters.learning_rate = train_lineage.get(LineageMetadata.learning_rate)
if train_lineage.get(LineageMetadata.optimizer) is not None:
lineage_message.hyper_parameters.optimizer = train_lineage.get(LineageMetadata.optimizer)
if train_lineage.get(LineageMetadata.loss_function) is not None:
lineage_message.hyper_parameters.loss_function = train_lineage.get(LineageMetadata.loss_function)
if train_lineage.get(LineageMetadata.parallel_mode) is not None:
lineage_message.hyper_parameters.parallel_mode = train_lineage.get(LineageMetadata.parallel_mode)
lineage_message.hyper_parameters.epoch = train_lineage.get(LineageMetadata.epoch)
lineage_message.hyper_parameters.device_num = train_lineage.get(LineageMetadata.device_num)
lineage_message.hyper_parameters.batch_size = train_lineage.get(LineageMetadata.batch_size)
return lineage_message
def _parse_dataset(self, cb_params, lineage_dict):
"""
Analyze Dataset to get the dataset path and dataset size.
Args:
cb_params (_InternalCallbackParam): Callback parameters.
lineage_dict (dict): The lineage dict, refer to the attribute
of `_collect_train_lineage` method or `_collect_eval_lineage`.
Returns:
dict, the lineage metadata.
"""
dataset = cb_params.train_dataset if cb_params.mode == ModeEnum.TRAIN.value else cb_params.valid_dataset
try:
dataset_path = self._get_dataset_path(dataset)
except IndexError:
dataset_path = None
if dataset_path and os.path.isfile(dataset_path):
dataset_dir = os.path.dirname(dataset_path)
else:
dataset_dir = dataset_path
batch_num = dataset.get_dataset_size()
batch_size = dataset.get_batch_size()
dataset_size = int(batch_num * batch_size)
lineage_dict[LineageMetadata.batch_size] = batch_size
if cb_params.mode == ModeEnum.TRAIN.value:
lineage_dict[LineageMetadata.train_dataset_path] = dataset_dir
lineage_dict[LineageMetadata.train_dataset_size] = dataset_size
else:
lineage_dict[LineageMetadata.valid_dataset_path] = dataset_dir
lineage_dict[LineageMetadata.valid_dataset_size] = dataset_size
return lineage_dict
def _get_dataset_path(self, output_dataset):
"""
Get dataset path of MindDataset object.
Args:
output_dataset (Union[Dataset, ImageFolderDatasetV2, MnistDataset, Cifar10Dataset, Cifar100Dataset,
VOCDataset, CelebADataset, MindDataset, ManifestDataset, TFRecordDataset, TextFileDataset]):
Refer to mindspore.dataset.Dataset.
Returns:
str, dataset path.
Raises:
IndexError: it means get dataset path failed.
"""
dataset_package = import_module('mindspore.dataset')
dataset_dir_set = (dataset_package.ImageFolderDatasetV2, dataset_package.MnistDataset,
dataset_package.Cifar10Dataset, dataset_package.Cifar100Dataset,
dataset_package.VOCDataset, dataset_package.CelebADataset)
dataset_file_set = (dataset_package.MindDataset, dataset_package.ManifestDataset)
dataset_files_set = (dataset_package.TFRecordDataset, dataset_package.TextFileDataset)
if isinstance(output_dataset, dataset_file_set):
return output_dataset.dataset_file
if isinstance(output_dataset, dataset_dir_set):
return output_dataset.dataset_dir
if isinstance(output_dataset, dataset_files_set):
return output_dataset.dataset_files[0]
return self._get_dataset_path(output_dataset.children[0])
@staticmethod
def _get_ckpt_file_path(cb_params):
"""
Get checkpoint file path from MindSpore callback list.
Args:
cb_params (_InternalCallbackParam): Callback parameters.
Returns:
Union[str, None], if parse success will checkpoint file absolute path, else return None.
"""
callbacks = cb_params.list_callback
ckpt_file_path = None
for callback in callbacks:
if isinstance(callback, ModelCheckpoint):
ckpt_file_path = callback.latest_ckpt_file_name
if ckpt_file_path:
ckpt_file_path = os.path.realpath(ckpt_file_path)
return ckpt_file_path
@staticmethod
def _get_loss_fn(cb_params):
"""
Get loss function by cb_params and analyzing network.
Args:
cb_params (_InternalCallbackParam): Callback parameters.
Returns:
Union[Cell, None], a Cell object, if parse failed, will return None.
"""
loss_fn = cb_params.loss_fn
if loss_fn is not None:
return loss_fn
if cb_params.mode == ModeEnum.TRAIN.value:
network = cb_params.train_network
else:
network = cb_params.eval_network
for _, cell in network.cells_and_names():
if isinstance(cell, _Loss):
loss_fn = cell
break
return loss_fn
def _collect_eval_lineage(self, cb_params):
"""Collect eval lineage data, the detail refer to lineage_pb2.EvaluationLineage."""
if not self._collect_specified_data.get('collect_eval_lineage'):
return
eval_lineage = dict()
eval_lineage[LineageMetadata.metrics] = json.dumps(cb_params.metrics)
self._parse_dataset(cb_params, eval_lineage)
eval_lineage_message = self._package_eval_lineage_message(eval_lineage)
self._record.add_value(PluginEnum.EVAL_LINEAGE.value, 'eval_lineage', eval_lineage_message)
@staticmethod
def _package_eval_lineage_message(eval_lineage):
"""
Package eval lineage data into binary data.
Args:
eval_lineage (dict): The eval lineage dict, refer to the attribute of `_collect_eval_lineage` method.
Returns:
EvaluationLineage, a object of lineage_pb2.EvaluationLineage.
"""
lineage_message = lineage_pb2.EvaluationLineage()
if eval_lineage.get(LineageMetadata.metrics) is not None:
lineage_message.metric = eval_lineage.get(LineageMetadata.metrics)
if eval_lineage.get(LineageMetadata.valid_dataset_path) is not None:
lineage_message.valid_dataset.valid_dataset_path = eval_lineage.get(LineageMetadata.valid_dataset_path)
if eval_lineage.get(LineageMetadata.valid_dataset_size) is not None:
lineage_message.valid_dataset.valid_dataset_size = eval_lineage.get(LineageMetadata.valid_dataset_size)
return lineage_message
|
the-stack_106_32197 | import random
import time
print('Welcome to the game. I hope you know the rules. \n')
# time.sleep(3)
print('The game begins now!\n')
# time.sleep(2)
isoc = 'Heads'
cc = ['Flip', 'Not Flip']
ui1 = input("Choose to 'Flip' or 'Not Flip' the coin. I choose to ")
ci1 = random.choice(cc)
if ci1 == 'Flip':
isoc = 'Tales'
else:
isoc = 'Heads'
if ui1 == 'Flip' and isoc == 'Heads':
isoc = 'Tales'
elif ui1 == 'Flip' and isoc == 'Tales':
isoc = 'Heads'
else:
isoc = isoc
ci2 = random.choice(cc)
if ci2 == 'Flip' and isoc=='Heads':
isoc = 'Tales'
elif ci2=='Flip' and isoc=='Tales':
isoc = 'Heads'
if isoc == 'Tales':
print('Congrats, you made it. You Win')
if isoc == 'Heads':
print('Sorry, but the computer won.')
|
the-stack_106_32199 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, TYPE_CHECKING, Union, Optional
from .iterators import ReactionIterator
__all__ = (
'Reaction',
)
if TYPE_CHECKING:
from .types.message import Reaction as ReactionPayload
from .message import Message
from .partial_emoji import PartialEmoji
from .emoji import Emoji
from .abc import Snowflake
class Reaction:
"""Represents a reaction to a message.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two reactions are equal. This works by checking if the emoji
is the same. So two messages with the same reaction will be considered
"equal".
.. describe:: x != y
Checks if two reactions are not equal.
.. describe:: hash(x)
Returns the reaction's hash.
.. describe:: str(x)
Returns the string form of the reaction's emoji.
Attributes
-----------
emoji: Union[:class:`Emoji`, :class:`PartialEmoji`, :class:`str`]
The reaction emoji. May be a custom emoji, or a unicode emoji.
count: :class:`int`
Number of times this reaction was made
me: :class:`bool`
If the user sent this reaction.
message: :class:`Message`
Message this reaction is for.
"""
__slots__ = ('message', 'count', 'emoji', 'me')
def __init__(self, *, message: Message, data: ReactionPayload, emoji: Optional[Union[PartialEmoji, Emoji, str]] = None):
self.message: Message = message
self.emoji: Union[PartialEmoji, Emoji, str] = emoji or message._state.get_reaction_emoji(data['emoji'])
self.count: int = data.get('count', 1)
self.me: bool = data.get('me')
# TODO: typeguard
def is_custom_emoji(self) -> bool:
""":class:`bool`: If this is a custom emoji."""
return not isinstance(self.emoji, str)
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and other.emoji == self.emoji
def __ne__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return other.emoji != self.emoji
return True
def __hash__(self) -> int:
return hash(self.emoji)
def __str__(self) -> str:
return str(self.emoji)
def __repr__(self) -> str:
return f'<Reaction emoji={self.emoji!r} me={self.me} count={self.count}>'
async def remove(self, user: Snowflake) -> None:
"""|coro|
Remove the reaction by the provided :class:`User` from the message.
If the reaction is not your own (i.e. ``user`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``user`` parameter must represent a user or member and meet
the :class:`abc.Snowflake` abc.
Parameters
-----------
user: :class:`abc.Snowflake`
The user or member from which to remove the reaction.
Raises
-------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The user you specified, or the reaction's message was not found.
"""
await self.message.remove_reaction(self.emoji, user)
async def clear(self) -> None:
"""|coro|
Clears this reaction from the message.
You need the :attr:`~Permissions.manage_messages` permission to use this.
.. versionadded:: 1.3
Raises
--------
HTTPException
Clearing the reaction failed.
Forbidden
You do not have the proper permissions to clear the reaction.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
await self.message.clear_reaction(self.emoji)
def users(self, *, limit: Optional[int] = None, after: Optional[Snowflake] = None) -> ReactionIterator:
"""Returns an :class:`AsyncIterator` representing the users that have reacted to the message.
The ``after`` parameter must represent a member
and meet the :class:`abc.Snowflake` abc.
Examples
---------
Usage ::
# I do not actually recommend doing this.
async for user in reaction.users():
await channel.send(f'{user} has reacted with {reaction.emoji}!')
Flattening into a list: ::
users = await reaction.users().flatten()
# users is now a list of User...
winner = random.choice(users)
await channel.send(f'{winner} has won the raffle.')
Parameters
------------
limit: Optional[:class:`int`]
The maximum number of results to return.
If not provided, returns all the users who
reacted to the message.
after: Optional[:class:`abc.Snowflake`]
For pagination, reactions are sorted by member.
Raises
--------
HTTPException
Getting the users for the reaction failed.
Yields
--------
Union[:class:`User`, :class:`Member`]
The member (if retrievable) or the user that has reacted
to this message. The case where it can be a :class:`Member` is
in a guild message context. Sometimes it can be a :class:`User`
if the member has left the guild.
"""
if not isinstance(self.emoji, str):
emoji = f'{self.emoji.name}:{self.emoji.id}'
else:
emoji = self.emoji
if limit is None:
limit = self.count
return ReactionIterator(self.message, emoji, limit, after)
|
the-stack_106_32204 | #!/usr/bin/env python
"""
Simple example that sets a key, and retrieves it again.
"""
import asyncio
from asyncio_redis import RedisProtocol
async def main():
# Create connection
transport, protocol = await loop.create_connection(RedisProtocol, "localhost", 6379)
# Set a key
await protocol.set("key", "value")
# Retrieve a key
result = await protocol.get("key")
# Print result
print("Succeeded", result == "value")
transport.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
the-stack_106_32205 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# Only difference from the original (hence the above Copyright): .variances -> .variance
import numpy as np
from GPy.kern.src.kern import Kern
from GPy.util.linalg import tdot
from GPy.core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
from GPy.kern.src.psi_comp import PSICOMP_Linear
class Linear(Kern):
"""
Linear kernel
.. math::
k(x,y) = \sum_{i=1}^{\\text{input_dim}} \sigma^2_i x_iy_i
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the vector of variance :math:`\sigma^2_i`
:type variance: array or list of the appropriate size (or float if there
is only one variance parameter)
:param ARD: Auto Relevance Determination. If False, the kernel has only one
variance parameter \sigma^2, otherwise there is one variance
parameter per dimension.
:type ARD: Boolean
:rtype: kernel object
"""
def __init__(self, input_dim, variance=None, ARD=False, active_dims=None, name='linear'):
super(Linear, self).__init__(input_dim, active_dims, name)
self.ARD = ARD
if not ARD:
if variance is not None:
variance = np.asarray(variance)
assert variance.size == 1, "Only one variance needed for non-ARD kernel"
else:
variance = np.ones(1)
else:
if variance is not None:
variance = np.asarray(variance)
assert variance.size == self.input_dim, "bad number of variance, need one ARD variance per input_dim"
else:
variance = np.ones(self.input_dim)
self.variance = Param('variance', variance, Logexp())
self.link_parameter(self.variance)
self.psicomp = PSICOMP_Linear()
def to_dict(self):
input_dict = super(Linear, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Linear"
input_dict["variance"] = self.variance.values.tolist()
input_dict["ARD"] = self.ARD
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Linear(**input_dict)
@Cache_this(limit=3)
def K(self, X, X2=None):
if self.ARD:
if X2 is None:
return tdot(X*np.sqrt(self.variance))
else:
rv = np.sqrt(self.variance)
return np.dot(X*rv, (X2*rv).T)
else:
return self._dot_product(X, X2) * self.variance
@Cache_this(limit=3, ignore_args=(0,))
def _dot_product(self, X, X2=None):
if X2 is None:
return tdot(X)
else:
return np.dot(X, X2.T)
def Kdiag(self, X):
return np.sum(self.variance * np.square(X), -1)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None: dL_dK = (dL_dK+dL_dK.T)/2
if self.ARD:
if X2 is None:
#self.variance.gradient = np.array([np.sum(dL_dK * tdot(X[:, i:i + 1])) for i in range(self.input_dim)])
self.variance.gradient = (dL_dK.dot(X)*X).sum(0) #np.einsum('ij,iq,jq->q', dL_dK, X, X)
else:
#product = X[:, None, :] * X2[None, :, :]
#self.variance.gradient = (dL_dK[:, :, None] * product).sum(0).sum(0)
self.variance.gradient = (dL_dK.dot(X2)*X).sum(0) #np.einsum('ij,iq,jq->q', dL_dK, X, X2)
else:
self.variance.gradient = np.sum(self._dot_product(X, X2) * dL_dK)
def update_gradients_diag(self, dL_dKdiag, X):
tmp = dL_dKdiag[:, None] * X ** 2
if self.ARD:
self.variance.gradient = tmp.sum(0)
else:
self.variance.gradient = np.atleast_1d(tmp.sum())
def gradients_X(self, dL_dK, X, X2=None):
if X2 is None: dL_dK = (dL_dK+dL_dK.T)/2
if X2 is None:
return dL_dK.dot(X)*(2*self.variance) #np.einsum('jq,q,ij->iq', X, 2*self.variance, dL_dK)
else:
#return (((X2[None,:, :] * self.variance)) * dL_dK[:, :, None]).sum(1)
return dL_dK.dot(X2)*self.variance #np.einsum('jq,q,ij->iq', X2, self.variance, dL_dK)
def gradients_XX(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective K(dL_dK), compute the second derivative of K wrt X and X2:
returns the full covariance matrix [QxQ] of the input dimensionfor each pair or vectors, thus
the returned array is of shape [NxNxQxQ].
..math:
\frac{\partial^2 K}{\partial X2 ^2} = - \frac{\partial^2 K}{\partial X\partial X2}
..returns:
dL2_dXdX2: [NxMxQxQ] for X [NxQ] and X2[MxQ] (X2 is X if, X2 is None)
Thus, we return the second derivative in X2.
"""
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]))
#if X2 is None: dL_dK = (dL_dK+dL_dK.T)/2
#if X2 is None:
# return np.ones(np.repeat(X.shape, 2)) * (self.variance[None,:] + self.variance[:, None])[None, None, :, :]
#else:
# return np.ones((X.shape[0], X2.shape[0], X.shape[1], X.shape[1])) * (self.variance[None,:] + self.variance[:, None])[None, None, :, :]
def gradients_X_diag(self, dL_dKdiag, X):
return 2.*self.variance*dL_dKdiag[:,None]*X
def gradients_XX_diag(self, dL_dKdiag, X):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]))
#dims = X.shape
#if cov:
# dims += (X.shape[1],)
#return 2*np.ones(dims)*self.variance
def input_sensitivity(self, summarize=True):
return np.ones(self.input_dim) * self.variance
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
if self.ARD:
self.variance.gradient = dL_dvar
else:
self.variance.gradient = dL_dvar.sum()
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:] |
the-stack_106_32206 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
from typing import List
from fairseq.data.legacy.masked_lm_dictionary import BertDictionary
from pytext.config.component import ComponentType, create_component
from pytext.data.tensorizers import TokenTensorizer
from pytext.data.tokenizers import Tokenizer, WordPieceTokenizer
from pytext.data.utils import BOS, EOS, MASK, PAD, UNK, Vocabulary, pad_and_tensorize
class BERTTensorizer(TokenTensorizer):
"""
Tensorizer for BERT tasks. Works for single sentence, sentence pair, triples etc.
"""
__EXPANSIBLE__ = True
class Config(TokenTensorizer.Config):
#: The tokenizer to use to split input text into tokens.
columns: List[str] = ["text"]
tokenizer: Tokenizer.Config = WordPieceTokenizer.Config()
add_bos_token: bool = False
add_eos_token: bool = True
bos_token: str = "[CLS]"
eos_token: str = "[SEP]"
pad_token: str = "[PAD]"
unk_token: str = "[UNK]"
mask_token: str = "[MASK]"
vocab_file: str = WordPieceTokenizer.Config().wordpiece_vocab_path
@classmethod
def from_config(cls, config: Config, **kwargs):
tokenizer = create_component(ComponentType.TOKENIZER, config.tokenizer)
replacements = {
config.unk_token: UNK,
config.pad_token: PAD,
config.bos_token: BOS,
config.eos_token: EOS,
config.mask_token: MASK,
}
if isinstance(tokenizer, WordPieceTokenizer):
vocab = Vocabulary(
[token for token, _ in tokenizer.vocab.items()],
replacements=replacements,
)
else:
dictionary = BertDictionary.load(config.vocab_file)
vocab = Vocabulary(
dictionary.symbols, dictionary.count, replacements=replacements
)
return cls(
columns=config.columns,
tokenizer=tokenizer,
add_bos_token=config.add_bos_token,
add_eos_token=config.add_eos_token,
use_eos_token_for_bos=config.use_eos_token_for_bos,
max_seq_len=config.max_seq_len,
vocab=vocab,
**kwargs,
)
def __init__(self, columns, **kwargs):
super().__init__(text_column=None, **kwargs)
self.columns = columns
# Manually initialize column_schema since we are sending None to TokenTensorizer
@property
def column_schema(self):
return [(column, str) for column in self.columns]
def numberize(self, row):
"""Tokenize, look up in vocabulary."""
sentences = [self._lookup_tokens(row[column])[0] for column in self.columns]
sentences[0] = [self.vocab.idx[BOS]] + sentences[0]
seq_lens = (len(sentence) for sentence in sentences)
segment_labels = ([i] * seq_len for i, seq_len in enumerate(seq_lens))
tokens = list(itertools.chain(*sentences))
segment_labels = list(itertools.chain(*segment_labels))
seq_len = len(tokens)
# tokens, segment_label, seq_len
return tokens, segment_labels, seq_len
def sort_key(self, row):
return row[2]
def tensorize(self, batch):
tokens, segment_labels, seq_lens = zip(*batch)
tokens = pad_and_tensorize(tokens, self.vocab.get_pad_index())
pad_mask = (tokens != self.vocab.get_pad_index()).long()
segment_labels = pad_and_tensorize(segment_labels, self.vocab.get_pad_index())
return tokens, pad_mask, segment_labels
|
the-stack_106_32207 | """Expectation values and other statistical measures for operators: ``<A>"""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Integer, Symbol, Integral
from sympy.physics.quantum import Dagger
from sympy.physics.quantum import qapply
__all__ = [
'Expectation',
'Covariance'
]
#-----------------------------------------------------------------------------
# Expectation
#-----------------------------------------------------------------------------
class Expectation(Expr):
"""
Expectation Value of an operator, expressed in terms of bracket <A>.
If the second argument, 'is_normal_order' is 'True',
the normal ordering notation (<: :>) is attached.
doit() returns the normally ordered operator inside the bracket.
Parameters
==========
A : Expr
The argument of the expectation value <A>
is_normal_order : bool
A bool that indicates if the operator inside the Expectation
value bracket should be normally ordered (True) or left
untouched (False, default value)
Examples
========
>>> a = BosonOp("a")
>>> Expectation(a * Dagger(a))
<a a†>
>>> Expectation(a * Dagger(a), True)
<:a a†:>
>>> Expectation(a * Dagger(a), True).doit()
<a† a>
"""
is_commutative = True
@property
def expression(self):
return self.args[0]
@property
def is_normal_order(self):
return bool(self.args[1])
@classmethod
def default_args(self):
return (Symbol("A"), False)
def __new__(cls, *args):
if not len(args) in [1, 2]:
raise ValueError('1 or 2 parameters expected, got %s' % str(args))
if len(args) == 1:
args = (args[0], Integer(0))
if len(args) == 2:
args = (args[0], Integer(args[1]))
return Expr.__new__(cls, *args)
def _eval_expand_expectation(self, **hints):
A = self.args[0]
if isinstance(A, Add):
# <A + B> = <A> + <B>
return Add(*(Expectation(a, self.is_normal_order).expand(expectation=True) for a in A.args))
if isinstance(A, Mul):
# <c A> = c<A> where c is a commutative term
A = A.expand()
cA, ncA = A.args_cnc()
return Mul(Mul(*cA), Expectation(Mul._from_args(ncA), self.is_normal_order).expand())
if isinstance(A, Integral):
# <∫adx> -> ∫<a>dx
func, lims = A.function, A.limits
new_args = [Expectation(func, self.is_normal_order).expand()]
for lim in lims:
new_args.append(lim)
return Integral(*new_args)
return self
def doit(self, **hints):
"""
return the expectation value normally ordered operator if is_normal_order=True
"""
from sympsi.operatorordering import normal_order
if self.is_normal_order == True:
return Expectation(normal_order(self.args[0]), False)
return self
def eval_state(self, state):
return qapply(Dagger(state) * self.args[0] * state, dagger=True).doit()
def _latex(self, printer, *args):
if self.is_normal_order:
return r"\left\langle: %s :\right\rangle" % printer._print(self.args[0], *args)
else:
return r"\left\langle %s \right\rangle" % printer._print(self.args[0], *args)
#-----------------------------------------------------------------------------
# Covariance
#-----------------------------------------------------------------------------
class Covariance(Expr):
"""Covariance of two operators, expressed in terms of bracket <A, B>
If the third argument, 'is_normal_order' is 'True',
the normal ordering notation (<: , :>) is attached.
doit() returns the expression in terms of expectation values.
< A, B > --> < AB > - < A >< B >
Parameters
==========
A : Expr
The first argument of the expectation value
B : Expr
The second argument of the expectation value
is_normal_order : bool
A bool that indicates if the operator inside the Expectation
value bracket should be normally ordered (True) or left
untouched (False, default value)
Examples
========
>>> A, B = Operator("A"), Operator("B")
>>> Covariance(A, B)
< A, B >
>>> Covariance(A, B, True)
<:A, B:>
>>> Covariance(A, B).doit()
< AB > - < A >< B >
>>> Covariance(A, B, True).doit()
<:AB:> - <:A:><:B:>
"""
is_commutative = True
@property
def is_normal_order(self):
return bool(self.args[2])
@classmethod
def default_args(self):
return (Symbol("A"), Symbol("B"), False)
def __new__(cls, *args, **hints):
if not len(args) in [2, 3]:
raise ValueError('2 or 3 parameters expected, got %s' % args)
if len(args) == 2:
args = (args[0], args[1], Integer(0))
if len(args) == 3:
args = (args[0], args[1], Integer(args[2]))
return Expr.__new__(cls, *args)
def _eval_expand_covariance(self, **hints):
A, B = self.args[0], self.args[1]
# <A + B, C> = <A, C> + <B, C>
if isinstance(A, Add):
return Add(*(Covariance(a, B, self.is_normal_order).expand()
for a in A.args))
# <A, B + C> = <A, B> + <A, C>
if isinstance(B, Add):
return Add(*(Covariance(A, b, self.is_normal_order).expand()
for b in B.args))
if isinstance(A, Mul):
A = A.expand()
cA, ncA = A.args_cnc()
return Mul(Mul(*cA), Covariance(Mul._from_args(ncA), B,
self.is_normal_order).expand())
if isinstance(B, Mul):
B = B.expand()
cB, ncB = B.args_cnc()
return Mul(Mul(*cB), Covariance(A, Mul._from_args(ncB),
self.is_normal_order).expand())
if isinstance(A, Integral):
# <∫adx, B> -> ∫<a, B>dx
func, lims = A.function, A.limits
new_args = [Covariance(func, B, self.is_normal_order).expand()]
for lim in lims:
new_args.append(lim)
return Integral(*new_args)
if isinstance(B, Integral):
# <A, ∫bdx> -> ∫<A, b>dx
func, lims = B.function, B.limits
new_args = [Covariance(A, func, self.is_normal_order).expand()]
for lim in lims:
new_args.append(lim)
return Integral(*new_args)
return self
def doit(self, **hints):
""" Evaluate covariance of two operators A and B """
A = self.args[0]
B = self.args[1]
no = self.is_normal_order
return Expectation(A*B, no) - Expectation(A, no) * Expectation(B, no)
def _latex(self, printer, *args):
if self.is_normal_order:
return r"\left\langle: %s, %s :\right\rangle" % tuple([
printer._print(self.args[0], *args),
printer._print(self.args[1], *args)])
else:
return r"\left\langle %s, %s \right\rangle" % tuple([
printer._print(self.args[0], *args),
printer._print(self.args[1], *args)])
|
the-stack_106_32209 | import unittest
def midpoint(linked_list):
slow = linked_list.head
fast = linked_list.head
while slow.next_node is not None and fast.next_node is not None and fast.next_node.next_node is not None:
slow = slow.next_node
fast = fast.next_node.next_node
return slow
class TestMidpoint(unittest.TestCase):
def test_odd_numbered_linked_list(self):
linked_list = LinkedList()
linked_list.insert_last('a')
linked_list.insert_last('b')
linked_list.insert_last('c')
self.assertEqual('b', midpoint(linked_list).data)
linked_list = LinkedList()
linked_list.insert_last('a')
linked_list.insert_last('b')
linked_list.insert_last('c')
linked_list.insert_last('d')
linked_list.insert_last('e')
self.assertEqual('c', midpoint(linked_list).data)
def test_even_numbered_linked_list(self):
linked_list = LinkedList()
linked_list.insert_last('a')
linked_list.insert_last('b')
self.assertEqual('a', midpoint(linked_list).data)
linked_list = LinkedList()
linked_list.insert_last('a')
linked_list.insert_last('b')
linked_list.insert_last('c')
linked_list.insert_last('d')
self.assertEqual('b', midpoint(linked_list).data)
class Node:
def __init__(self, data, next_node=None):
self.data = data
self.next_node = next_node
class LinkedList:
def __init__(self):
self.head = None
def insert_first(self, data):
self.insert_at(data, 0)
def size(self):
size = 0
node = self.head
while node is not None:
size = size + 1
node = node.next_node
return size
def get_first(self):
return self.get_at(0)
def get_last(self):
return self.get_at(self.size() - 1)
def clear(self):
self.head = None
def remove_first(self):
if self.head is not None:
self.head = self.head.next_node
def remove_last(self):
size_of_linked_list = self.size()
if size_of_linked_list is 0:
return
elif size_of_linked_list is 1:
self.head = None
else:
previous = self.head
last = self.head
while last is not None and last.next_node is not None:
previous = last
last = last.next_node
previous.next_node = None
def insert_last(self, data):
self.insert_at(data, self.size())
def get_at(self, index):
if index >= self.size() or index < 0 or self.head is None:
return None
else:
node = self.head
counter = 0
while counter < index:
node = node.next_node
counter = counter + 1
return node
def remove_at(self, index):
if self.size() is 0:
return
elif index is 0:
self.head = self.head.next_node
elif 0 < index < self.size():
counter = 0
previous_node = self.get_at(index - 1)
previous_node.next_node = previous_node.next_node.next_node
def insert_at(self, data, index):
new_node = Node(data)
if self.size() == 0:
self.head = new_node
elif index <= 0:
new_node.next_node = self.head
self.head = new_node
elif index >= self.size():
last_node = self.get_last()
last_node.next_node = new_node
else:
previous_node = self.get_at(index - 1)
new_node.next_node = previous_node.next_node
previous_node.next_node = new_node
|
the-stack_106_32211 | # This file comes originally from https://github.com/google-research/bert/blob/master/tokenization.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
the-stack_106_32213 | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import random
import os
print("FRIDAY")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
hour = int(datetime.datetime.now().hour)
if hour >=0 and hour<=12:
speak("good morning! ")
elif hour>=12 and hour <18:
speak("good aftenoon!")
else:
speak("good evening! ")
speak("How may i help you ?")
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source,duration=1)
#r.pause_threshold = 1
print("Listening....")
audio = r.listen(source,phrase_time_limit=4)
#print("m done ")
try :
print("Recognizing....")
query =r.recognize_google(audio,language='en-in')
#print("user said : ",query)
#speak(query)
except Exception :
print("Say that again please !! ")
return "none"
return query
if __name__ == "__main__":
wishme()
while True:
query = takecommand().lower()
#TASKS
if 'wikipedia' in query :
try:
speak ('searching wikipedia.....')
query = query.replace("wikipedia","")
results = wikipedia.summary (query,sentences=2)
speak("according to wikipedia")
speak(results)
except :
speak("i did not get it.Try saying that again")
elif "open youtube" in query:
speak("opening youtube")
webbrowser. get(chrome_path). open_new_tab("youtube.com")
elif "study time" in query:
speak("starting lofi hiphop")
webbrowser. get(chrome_path). open_new_tab("https://www.youtube.com/watch?v=hHW1oY26kxQ")
elif "github" in query:
speak("opening github")
webbrowser. get(chrome_path). open_new_tab("https://github.com/")
elif "play music" in query:
speak("playing music ")
music_dir = "C:\\Users\\shikh\\Desktop\\music"
songs = os.listdir(music_dir)
num = random.randint(0,len(songs))
os.startfile(os.path.join(music_dir,songs[num-1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%I%p")
speak(f"It's {strtime} ")
elif "open code" in query :
speak("opening visual studio code")
codepath = "C:\\Users\\shikh\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codepath)
elif "open c plus plus" in query :
speak("opening codeblocks")
codepath = "C:\\Program Files (x86)\\CodeBlocks\\codeblocks.exe"
os.startfile(codepath)
elif "open word" in query :
speak("opening microsoft word")
codepath = "C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\WINWORD.EXE"
os.startfile(codepath)
elif "google" in query:
query = query.replace("google search","")
speak ('searching on google.....')
url = "https://www.google.co.in/search?q=" +(str(query))+ "&oq="+(str(query))+"&gs_l=serp.12..0i71l8.0.0.0.6391.0.0.0.0.0.0.0.0..0.0....0...1c..64.serp..0.0.0.UiQhpfaBsuU"
webbrowser.open_new_tab(url)
elif "exit" in query:
speak("see you later")
exit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.