content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from globconf import config
from globconf import verify_required_options
import unittest
import os
# let's test on a predefined file included in the unitteast
config.read(os.path.dirname(__file__)+'/config.ini')
class TestConf(unittest.TestCase):
def test_config_file_present(self):
self.assertTrue(os.path.isfile(os.path.dirname(__file__)+'/config.ini'), 'config.ini in project root dir is missing')
def test_verify_required_options(self):
self.assertTrue(verify_required_options('SectionOne', ['parameter_one', 'parameter_two']))
def test_verify_sections(self):
self.assertEqual(2, 2)
| 31.45 | 125 | 0.756757 | [
"MIT"
] | klang/globconf | test/test_verify_required_options.py | 629 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/enums/recommendation_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/enums/recommendation_type.proto',
package='google.ads.googleads.v2.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v2.enumsB\027RecommendationTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V2.Enums\312\002\035Google\\Ads\\GoogleAds\\V2\\Enums\352\002!Google::Ads::GoogleAds::V2::Enums'),
serialized_pb=_b('\n=google/ads/googleads_v2/proto/enums/recommendation_type.proto\x12\x1dgoogle.ads.googleads.v2.enums\x1a\x1cgoogle/api/annotations.proto\"\x92\x03\n\x16RecommendationTypeEnum\"\xf7\x02\n\x12RecommendationType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x13\n\x0f\x43\x41MPAIGN_BUDGET\x10\x02\x12\x0b\n\x07KEYWORD\x10\x03\x12\x0b\n\x07TEXT_AD\x10\x04\x12\x15\n\x11TARGET_CPA_OPT_IN\x10\x05\x12\x1f\n\x1bMAXIMIZE_CONVERSIONS_OPT_IN\x10\x06\x12\x17\n\x13\x45NHANCED_CPC_OPT_IN\x10\x07\x12\x1a\n\x16SEARCH_PARTNERS_OPT_IN\x10\x08\x12\x1a\n\x16MAXIMIZE_CLICKS_OPT_IN\x10\t\x12\x18\n\x14OPTIMIZE_AD_ROTATION\x10\n\x12\x15\n\x11\x43\x41LLOUT_EXTENSION\x10\x0b\x12\x16\n\x12SITELINK_EXTENSION\x10\x0c\x12\x12\n\x0e\x43\x41LL_EXTENSION\x10\r\x12\x16\n\x12KEYWORD_MATCH_TYPE\x10\x0e\x12\x16\n\x12MOVE_UNUSED_BUDGET\x10\x0f\x42\xec\x01\n!com.google.ads.googleads.v2.enumsB\x17RecommendationTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V2.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V2\\Enums\xea\x02!Google::Ads::GoogleAds::V2::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_RECOMMENDATIONTYPEENUM_RECOMMENDATIONTYPE = _descriptor.EnumDescriptor(
name='RecommendationType',
full_name='google.ads.googleads.v2.enums.RecommendationTypeEnum.RecommendationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_BUDGET', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KEYWORD', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT_AD', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TARGET_CPA_OPT_IN', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMIZE_CONVERSIONS_OPT_IN', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ENHANCED_CPC_OPT_IN', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_PARTNERS_OPT_IN', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMIZE_CLICKS_OPT_IN', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OPTIMIZE_AD_ROTATION', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CALLOUT_EXTENSION', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SITELINK_EXTENSION', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CALL_EXTENSION', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KEYWORD_MATCH_TYPE', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE_UNUSED_BUDGET', index=15, number=15,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=154,
serialized_end=529,
)
_sym_db.RegisterEnumDescriptor(_RECOMMENDATIONTYPEENUM_RECOMMENDATIONTYPE)
_RECOMMENDATIONTYPEENUM = _descriptor.Descriptor(
name='RecommendationTypeEnum',
full_name='google.ads.googleads.v2.enums.RecommendationTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_RECOMMENDATIONTYPEENUM_RECOMMENDATIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=529,
)
_RECOMMENDATIONTYPEENUM_RECOMMENDATIONTYPE.containing_type = _RECOMMENDATIONTYPEENUM
DESCRIPTOR.message_types_by_name['RecommendationTypeEnum'] = _RECOMMENDATIONTYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RecommendationTypeEnum = _reflection.GeneratedProtocolMessageType('RecommendationTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _RECOMMENDATIONTYPEENUM,
__module__ = 'google.ads.googleads_v2.proto.enums.recommendation_type_pb2'
,
__doc__ = """Container for enum describing types of recommendations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.enums.RecommendationTypeEnum)
))
_sym_db.RegisterMessage(RecommendationTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.233333 | 1,160 | 0.767179 | [
"Apache-2.0"
] | BenRKarl/google-ads-python | google/ads/google_ads/v2/proto/enums/recommendation_type_pb2.py | 6,185 | Python |
#!/usr/bin/env python3
"""Write a type-annotated function sum_mixed_list which takes
a list mxd_lst of integers and floats and returns their sum as a float."""
from typing import Iterable, List, Union
def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:
"""sum all float number in list
Args:
input_list (List[float]): arg
Returns:
float: result
"""
return sum(mxd_lst)
| 23.333333 | 74 | 0.680952 | [
"MIT"
] | JoseAVallejo12/holbertonschool-web_back_end | 0x00-python_variable_annotations/6-sum_mixed_list.py | 420 | Python |
__all__ = [
"BaseAgent",
"DoNothingAgent",
"OneChangeThenNothing",
"GreedyAgent",
"PowerLineSwitch",
"TopologyGreedy",
"AgentWithConverter",
"RandomAgent",
"MLAgent",
"RecoPowerlineAgent"
]
from grid2op.Agent.BaseAgent import BaseAgent
from grid2op.Agent.DoNothing import DoNothingAgent
from grid2op.Agent.OneChangeThenNothing import OneChangeThenNothing
from grid2op.Agent.GreedyAgent import GreedyAgent
from grid2op.Agent.PowerlineSwitch import PowerLineSwitch
from grid2op.Agent.TopologyGreedy import TopologyGreedy
from grid2op.Agent.AgentWithConverter import AgentWithConverter
from grid2op.Agent.RandomAgent import RandomAgent
from grid2op.Agent.MLAgent import MLAgent
from grid2op.Agent.RecoPowerlineAgent import RecoPowerlineAgent
| 32.5 | 67 | 0.811538 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | 19bdxx/GRID2OP | grid2op/Agent/__init__.py | 780 | Python |
"""
Камни и украшения
Даны две строки строчных латинских символов: строка J и строка S.
Символы, входящие в строку J, — «драгоценности», входящие в строку S — «камни».
Нужно определить, какое количество символов из S одновременно являются
«драгоценностями». Проще говоря, нужно проверить, какое количество символов
из S входит в J.
# Формат ввода
На двух первых строках входного файла содержатся две строки строчных латинских
символов: строка J и строка S. Длина каждой не превосходит 100 символов.
# Формат вывода
Выходной файл должен содержать единственное число — количество камней,
являющихся драгоценностями.
# Пример
Ввод:
ab
aabbccd
Вывод:
4
"""
import sys
diamonds = sys.stdin.readline().strip()
stones = sys.stdin.readline().strip()
found_diamonds = 0
for stone in stones:
for diamond in diamonds:
if stone == diamond:
found_diamonds += 1
print(found_diamonds)
| 22.073171 | 79 | 0.754696 | [
"MIT"
] | J0shu4B0y/Practice | Python/yandex/stones_and_diamonds.py | 1,414 | Python |
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub ``ctypes`` module.
Used by ``setuptools.windows_helpers``.
"""
| 35.210526 | 74 | 0.750374 | [
"Apache-2.0"
] | dhermes/google-cloud-python-on-gae | language-app/stubs/ctypes.py | 669 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import six
import paddle
from paddle.fluid import framework, backward, core
from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_MAGIC_NUM
from paddle.fluid.layers.utils import flatten
from paddle.fluid.layers.utils import pack_sequence_as
import paddle.compat as cpt
class NestSequence(object):
"""
A wrapper class that easily to flatten and restore the nest structure of
given sequence.
"""
def __init__(self, raw_input, need_check=False):
self.__raw_input = raw_input
self.__var_ids = self._get_var_ids()
self._check_non_variable(need_check)
def tolist(self):
"""
Flattens the nested sequences into single list.
"""
return flatten(self.__raw_input)
def restore(self, value_list):
"""
Restores the nested sequence from value list.
"""
assert len(self.tolist()) == len(value_list)
return pack_sequence_as(self.__raw_input, value_list)
def _get_var_ids(self):
var_ids = []
for idx, var in enumerate(self.tolist()):
if isinstance(var, (framework.Variable, core.VarBase)):
var_ids.append(idx)
return var_ids
def _check_non_variable(self, need_check):
"""
Raises warning if output of traced function contains non-tensor type values.
"""
if need_check:
warning_types = set()
for var in self.tolist():
if not isinstance(var, (framework.Variable, core.VarBase)):
warning_types.add(type(var))
if warning_types:
logging_utils.warn(
"Output of traced function contains non-tensor type values: {}. "
"Currently, We don't support to update them while training and will return "
"what we first saw. Please try to return them as tensor.".
format(list(warning_types)))
@property
def var_ids(self):
return self.__var_ids
def __getitem__(self, item):
return self.tolist()[item]
class LazyInitialized(object):
"""
Descriptor to implement lazy initialization of property.
"""
def __init__(self, function):
self.function = function
def __get__(self, instance, cls):
val = self.function(instance)
setattr(instance, self.function.__name__, val)
return val
def _change_is_test_status(program, is_test):
# change all `is_test` attributes
for block in program.blocks:
for op in block.ops:
if op.has_attr('is_test'):
op._set_attr('is_test', is_test)
return program
class PartialProgramLayer(layers.Layer):
"""
PartialProgramLayer wraps all the ops from layers decorated by `@declarative`
and execute them as a static subgraph.
.. note::
**1. This is a very low level API. Users should not use this API
directly. Please use `partial_program_from(concrete_program)`
to create it.
**2. LoDTensorArray is not currently supported in the output.
Args:
main_program(Program): The main program that contains ops need to be executed.
inputs(list[Variable]): The input list of the decorated function by `@declarative`.
outputs(list[Variable]): The output list of the decorated function by `@declarative`.
parameters(list[VarBase]|None): All trainable parameters included in the program. Default None.
Returns:
Layer: A Layer object that run all ops internally in static mode.
"""
def __init__(self, main_program, inputs, outputs, parameters=None):
super(PartialProgramLayer, self).__init__()
self._inputs = NestSequence(inputs)
self._outputs = NestSequence(outputs, need_check=True)
self._params = parameters if parameters is not None else []
self._origin_main_program = self._verify_program(main_program)
self._inner_scope = core.Scope()
# Set default mode to train
self._double_grads = self._get_double_grads(self._origin_main_program)
self.training = True
@LazyInitialized
def _infer_program(self):
"""
Lazy initialized property of infer_program.
"""
return self._clone_for_test(self._origin_main_program)
@LazyInitialized
def _train_program(self):
"""
Lazy initialized property of train_program.
"""
train_program = self._append_backward_desc(self._origin_main_program)
# Note: Only set grad type once after initializing train program. So we
# put it here.
self._set_grad_type(self._params, train_program)
return train_program
def _verify_program(self, main_program):
"""
Verify that the program parameter is initialized, prune some unused params,
and remove redundant op callstack.
"""
# 1. Check all params from main program can be found in self._params
self._check_params_all_inited(main_program)
# 2. Prune the parameters not used anywhere in the program.
self._prune_unused_params(main_program)
return main_program
@switch_to_static_graph
def _append_backward_desc(self, main_program):
# make sure all status of is_test are False in train mode.
program = _change_is_test_status(main_program.clone(), is_test=False)
targets = []
for out in self._outputs.tolist():
if isinstance(out, framework.Variable):
targets.append(program.global_block().var(out.name))
if targets and self._params:
backward.gradients(targets=targets, inputs=[])
return program
def _prune_unused_params(self, program):
"""
Prune the parameters not used anywhere in the program.
The `@declarative` may only decorated a sub function which
contains some unused parameters created in `__init__`.
So prune these parameters to avoid unnecessary operations in
`run_program_op`.
"""
required_params = []
for param in self._params:
found_param = False
for block in program.blocks:
for op in block.ops:
if param.name in op.input_arg_names or param.name in op.output_arg_names:
required_params.append(param)
found_param = True
break
if found_param:
break
self._params = required_params
def _get_double_grads(self, program):
double_grads = []
for block in program.blocks:
for name in block.vars:
if "@GRAD" in name:
var_desc = block.vars[name].desc
var_base = core.VarBase(var_desc.dtype(),
var_desc.shape(),
var_desc.name(),
var_desc.type(), False)
double_grads.append(var_base)
return double_grads
def forward(self, inputs):
in_vars, out_vars, tmp_scope_vec = self._prepare(inputs)
attrs = ('global_block', self.program.desc.block(0), 'start_op_index',
0, 'end_op_index', self._infer_program.desc.block(0).op_size(),
'is_test', not self.training)
core.ops.run_program(
valid_vars(in_vars),
valid_vars(self._params),
valid_vars(out_vars), tmp_scope_vec,
valid_vars(self._double_grads), *attrs)
restored_nest_out = self._restore_out(out_vars)
return self._remove_no_value(restored_nest_out)
@property
def program(self):
return self._train_program if self.training else self._infer_program
def _prepare(self, inputs):
"""
Prepare inputs, outputs, attrs.
"""
assert isinstance(inputs, (tuple, list))
# Flatten inputs with nested structure into single list.
flatten_inputs = flatten(inputs)
# Convert variable into VarBase and feed in training data.
input_vars = []
for i, value in enumerate(flatten_inputs):
if isinstance(value, np.ndarray):
var = core.VarBase(
value=value,
name=self._inputs[i].desc.name(),
persistable=False,
place=framework._current_expected_place(),
zero_copy=True)
elif isinstance(value, core.VarBase):
value.name = self._inputs[i].desc.name()
if value.stop_gradient:
# NOTE(Aurelius84): If var is on CPUPlace, it will be transformed multi times
# into CUDAPlace when it's as input of multi Ops. so we move it in advance
# to avoid this problem.
var = paddle.to_tensor(
value,
dtype=value.dtype,
place=framework._current_expected_place(),
stop_gradient=True)
var.name = value.name
else:
var = value
else:
continue
input_vars.append(var)
# Create VarBase to receive output data.
out_vars = []
for idx in self._outputs.var_ids:
var = self._outputs[idx]
assert isinstance(var, framework.Variable)
var_desc = var.desc
var_base = core.VarBase(var_desc.dtype(),
var_desc.shape(),
var_desc.name(), var_desc.type(), False)
out_vars.append(var_base)
# Hold forward variables
tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
"program_out_scope",
core.VarDesc.VarType.STEP_SCOPES, True)
tmp_scope_vec.value().set_scope(self._inner_scope)
return input_vars, out_vars, tmp_scope_vec
def _restore_out(self, out_vars):
"""
Restores same nested outputs by only replacing the Variable with VarBase.
"""
flatten_outputs = self._outputs.tolist()
for i, idx in enumerate(self._outputs.var_ids):
flatten_outputs[idx] = out_vars[i]
outs = self._outputs.restore(flatten_outputs)
if outs is not None and len(outs) == 1:
outs = outs[0]
return outs
@switch_to_static_graph
def _clone_for_test(self, main_program):
return main_program.clone(for_test=True)
def _is_no_value(self, var):
if isinstance(var, core.VarBase):
if var.shape == [1] and var.numpy()[0] == RETURN_NO_VALUE_MAGIC_NUM:
return True
return False
def _remove_no_value(self, out_vars):
"""
Removes invalid value for various-length return statement
"""
if isinstance(out_vars, core.VarBase):
if self._is_no_value(out_vars):
return None
return out_vars
elif isinstance(out_vars, (tuple, list)):
if isinstance(out_vars, tuple):
res = tuple(
var for var in out_vars if not self._is_no_value(var))
else:
# isinstance(out_vars, list)
res = [var for var in out_vars if not self._is_no_value(var)]
has_removed = (len(out_vars) > len(res))
# len(out_vars) > len(res) means we have removed var. This is
# preventing out_vars is empty or just one element at the beginning
if len(res) == 0 and has_removed:
return None
elif len(res) == 1 and has_removed:
return res[0]
return res
return out_vars
def _set_grad_type(self, params, train_program):
# NOTE: if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just
# set param grad VarBase by forward VarBase(LoDTensor)
# If we don't change grad_var type here, RunProgramOp need
# transform SelectedRows to LoDTensor forcibly, it may not
# be user wanted result.
for param in params:
grad_name = param.name + core.grad_var_suffix()
grad_var = train_program.desc.block(0).find_var(
cpt.to_bytes(grad_name))
# NOTE: cannot find var desc maybe no problem, such as in batch_norm
if grad_var is None:
continue
param._set_grad_type(grad_var.type())
def _remove_op_call_stack(self, main_program):
"""
Remove op's python call stack with redundant low-level error messages related to
transforamtions to avoid confusing users.
"""
assert isinstance(main_program, framework.Program)
for block in main_program.blocks:
for op in block.ops:
if op.has_attr("op_callstack"):
op._remove_attr("op_callstack")
return main_program
def _check_params_all_inited(self, main_program):
"""
Check all params from main program are already initialized, see details as follows:
1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.
2. all parameters from transformed program can be found in self._params.
Because they share same data with ParamBase of original dygraph.
"""
if not isinstance(self._params, (list, tuple)):
raise TypeError(
"Type of self._params in PartialProgramLayer should be list or tuple, but received %s."
% type(self._params))
param_and_buffer_names_set = set()
for i, var in enumerate(self._params):
# self._params constains parameters and buffers with persistable=True.
if not isinstance(var, core.VarBase):
raise TypeError(
'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'.
format(i, type(var)))
param_and_buffer_names_set.add(var.name)
for block in main_program.blocks:
for name, var in six.iteritems(block.vars):
if isinstance(var, framework.Parameter):
if name not in param_and_buffer_names_set:
raise ValueError(
"\n\tWe don't support to define layer with parameters in the function "
"decorated by `@declarative`.\n\tBecause that will re-defined parameters "
"every time when you run the function.\n\t"
"But we found parameter(%s) was created in the decorated function.\n\t"
"Please define the layer with parameters in `__init__` function."
% name)
def valid_vars(vars):
"""
Note: run_program_op.InferShape requires `X`/'Out' not be null.
But it's common in dy2static, fake varBase is created to handle the
problem.
"""
if vars:
return vars
return [
core.VarBase(
value=[1],
name='Fake_var',
place=framework._current_expected_place())
]
def partial_program_from(concrete_program):
inputs = concrete_program.inputs
if inputs and isinstance(inputs[0], layers.Layer):
inputs = inputs[1:]
return PartialProgramLayer(concrete_program.main_program, inputs,
concrete_program.outputs,
concrete_program.parameters)
| 38.656682 | 120 | 0.608929 | [
"Apache-2.0"
] | CheQiXiao/Paddle | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | 16,777 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import re
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('wmsmanager', '0002_auto_20151125_1310'),
]
operations = [
migrations.AlterField(
model_name='wmslayer',
name='kmi_name',
field=models.SlugField(blank=True, max_length=128, null=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-z0-9_]+$'), b'Slug can only contain lowercase letters, numbers and underscores', b'invalid')]),
preserve_default=True,
),
migrations.AlterField(
model_name='wmslayer',
name='name',
field=models.CharField(help_text=b'The name of wms layer', max_length=128),
preserve_default=True,
),
]
| 30.862069 | 236 | 0.637989 | [
"BSD-3-Clause"
] | dbca-asi/borgcollector | wmsmanager/migrations/0003_auto_20151203_1448.py | 895 | Python |
#!/usr/bin/env python3
# Copyright (c) 2020 The Elixir Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import ElixirTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
def on_addr(self, message):
for addr in message.addrs:
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
self.num_ipv4_received += 1
class GetAddrStore(P2PInterface):
getaddr_received = False
num_ipv4_received = 0
def on_getaddr(self, message):
self.getaddr_received = True
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
def addr_received(self):
return self.num_ipv4_received != 0
class AddrTest(ElixirTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver()))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received, False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(GetAddrStore())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(inbound_peer.addr_received)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
| 38.813084 | 136 | 0.67698 | [
"MIT"
] | robbelouwet/Elixir | test/functional/p2p_addr_relay.py | 8,306 | Python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import tarfile
import unittest
from unittest.mock import MagicMock
import fsspec
from torchx.specs import Role, AppDef
from torchx.workspace.docker_workspace import (
DockerWorkspace,
_build_context,
)
def has_docker() -> bool:
try:
import docker
docker.from_env()
return True
except (ImportError, docker.errors.DockerException):
return False
if has_docker():
class DockerWorkspaceTest(unittest.TestCase):
def test_docker_workspace(self) -> None:
fs = fsspec.filesystem("memory")
fs.mkdirs("test_workspace/bar", exist_ok=True)
with fs.open("test_workspace/bar/foo.sh", "w") as f:
f.write("exit 0")
role = Role(
name="ping",
image="busybox",
entrypoint="sh",
args=["bar/foo.sh"],
)
workspace = DockerWorkspace()
workspace.build_workspace_and_update_role(role, "memory://test_workspace")
self.assertNotEqual("busybox", role.image)
class DockerWorkspaceMockTest(unittest.TestCase):
def test_update_app_images(self) -> None:
app = AppDef(
name="foo",
roles=[
Role(
name="a",
image="sha256:hasha",
),
Role(name="b", image="sha256:hashb"),
Role(
name="c",
image="c",
),
],
)
want = AppDef(
name="foo",
roles=[
Role(
name="a",
image="example.com/repo:hasha",
),
Role(
name="b",
image="example.com/repo:hashb",
),
Role(
name="c",
image="c",
),
],
)
# no image_repo
with self.assertRaisesRegex(KeyError, "image_repo"):
DockerWorkspace()._update_app_images(app, {})
# with image_repo
images_to_push = DockerWorkspace()._update_app_images(
app,
{
"image_repo": "example.com/repo",
},
)
self.assertEqual(
images_to_push,
{
"sha256:hasha": ("example.com/repo", "hasha"),
"sha256:hashb": ("example.com/repo", "hashb"),
},
)
self.assertEqual(app, want)
def test_push_images(self) -> None:
client = MagicMock()
img = MagicMock()
client.images.get.return_value = img
workspace = DockerWorkspace(docker_client=client)
workspace._push_images(
{
"sha256:hasha": ("example.com/repo", "hasha"),
"sha256:hashb": ("example.com/repo", "hashb"),
}
)
self.assertEqual(client.images.get.call_count, 2)
self.assertEqual(img.tag.call_count, 2)
self.assertEqual(client.images.push.call_count, 2)
def test_push_images_empty(self) -> None:
workspace = DockerWorkspace()
workspace._push_images({})
def test_dockerignore(self) -> None:
fs = fsspec.filesystem("memory")
files = [
"dockerignore/ignoredir/bar",
"dockerignore/dir1/bar",
"dockerignore/dir/ignorefileglob1",
"dockerignore/dir/recursive/ignorefileglob2",
"dockerignore/dir/ignorefile",
"dockerignore/ignorefile",
"dockerignore/ignorefilesuffix",
"dockerignore/dir/file",
"dockerignore/foo.sh",
"dockerignore/unignore",
]
for file in files:
fs.touch(file)
with fs.open("dockerignore/.dockerignore", "wt") as f:
f.write(
"""
# comment
# dirs/files
ignoredir
ignorefile
# globs
*/ignorefileglo*1
**/ignorefileglob2
dir?
# inverse patterns
unignore
!unignore
# ignore .
.
"""
)
with _build_context("img", "memory://dockerignore") as f:
with tarfile.open(fileobj=f, mode="r") as tf:
self.assertCountEqual(
tf.getnames(),
{
"Dockerfile.torchx",
"foo.sh",
".dockerignore",
"dir/ignorefile",
"ignorefilesuffix",
"dir/file",
"unignore",
},
)
| 28.982857 | 86 | 0.477721 | [
"BSD-3-Clause"
] | daniellepintz/torchx | torchx/workspace/test/docker_workspace_test.py | 5,072 | Python |
# encoding: UTF-8
import time
from redtorch.event import *
from redtorch.trader.vtEvent import *
from redtorch.trader.vtConstant import *
from redtorch.trader.vtObject import *
########################################################################
class VtGateway(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName):
"""Constructor"""
self.eventEngine = eventEngine
self.gatewayName = gatewayName
#----------------------------------------------------------------------
def onTick(self, tick):
"""市场行情推送"""
# 通用事件
event1 = Event(type_=EVENT_TICK)
event1.dict_['data'] = tick
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_TICK+tick.vtSymbol)
event2.dict_['data'] = tick
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onTrade(self, trade):
"""成交信息推送"""
# 通用事件
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = trade
self.eventEngine.put(event1)
# 特定合约的成交事件
event2 = Event(type_=EVENT_TRADE+trade.vtSymbol)
event2.dict_['data'] = trade
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onOrder(self, order):
"""订单变化推送"""
# 通用事件
event1 = Event(type_=EVENT_ORDER)
event1.dict_['data'] = order
self.eventEngine.put(event1)
# 特定订单编号的事件
event2 = Event(type_=EVENT_ORDER+order.vtOrderID)
event2.dict_['data'] = order
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onPosition(self, position):
"""持仓信息推送"""
# 通用事件
event1 = Event(type_=EVENT_POSITION)
event1.dict_['data'] = position
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_POSITION+position.vtSymbol)
event2.dict_['data'] = position
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onAccount(self, account):
"""账户信息推送"""
# 通用事件
event1 = Event(type_=EVENT_ACCOUNT)
event1.dict_['data'] = account
self.eventEngine.put(event1)
# 特定合约代码的事件
event2 = Event(type_=EVENT_ACCOUNT+account.vtAccountID)
event2.dict_['data'] = account
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def onError(self, error):
"""错误信息推送"""
# 通用事件
event1 = Event(type_=EVENT_ERROR)
event1.dict_['data'] = error
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def onLog(self, log):
"""日志推送"""
# 通用事件
event1 = Event(type_=EVENT_LOG)
event1.dict_['data'] = log
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def onContract(self, contract):
"""合约基础信息推送"""
# 通用事件
event1 = Event(type_=EVENT_CONTRACT)
event1.dict_['data'] = contract
self.eventEngine.put(event1)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
pass
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
pass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
pass
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
pass
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
pass
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
pass
| 29.467532 | 75 | 0.388277 | [
"MIT"
] | sun0x00/redtorch_python | redtorch/trader/vtGateway.py | 4,840 | Python |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import generics, views
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_registration.settings import registration_settings
from rest_registration.utils.verification_notifications import (
send_register_verification_email_notification,
)
from internal import permissions as internal_permissions
from users import emails, permissions, serializers
UserModel = get_user_model()
class CurrentUserView(views.APIView):
"""View of the current user."""
permission_classes = [IsAuthenticated]
serializer_class = serializers.UserSerializerCurrent
def get(self, request):
"""Retrieve the user."""
user = request.user
serializer = self.serializer_class(user)
return Response(serializer.data)
class UserListView(generics.ListCreateAPIView):
"""List and creation of users."""
model = UserModel
queryset = UserModel.objects.all().order_by("username")
permission_classes = [
IsAuthenticated,
permissions.IsUsersManager | internal_permissions.IsReadOnly,
]
def get_serializer_class(self):
# serializer depends on permission level
if permissions.IsUsersManager().has_permission(self.request, self):
return serializers.UserCreationForManagerSerializer
return serializers.UserSerializer
def perform_create(self, serializer):
with transaction.atomic():
user = serializer.save()
# send verification email if requested
if registration_settings.REGISTER_VERIFICATION_ENABLED:
send_register_verification_email_notification(self.request, user)
class UserView(generics.RetrieveUpdateDestroyAPIView):
"""Edition and view of a user."""
model = UserModel
queryset = UserModel.objects.all()
permission_classes = [
IsAuthenticated,
permissions.IsUsersManager & permissions.IsNotSelf
| internal_permissions.IsReadOnly,
]
def get_serializer_class(self):
# serializer depends on permission level
if permissions.IsUsersManager().has_permission(self.request, self):
if settings.EMAIL_ENABLED:
return serializers.UserForManagerSerializer
return serializers.UserForManagerWithPasswordSerializer
return serializers.UserSerializer
def perform_update(self, serializer):
validated_by_manager_old = serializer.instance.validated_by_manager
super().perform_update(serializer)
validated_by_manager_new = serializer.instance.validated_by_manager
if not validated_by_manager_old and validated_by_manager_new:
# user has been validated by manager, send notification
emails.send_notification_to_user_validated(serializer.instance)
| 34.034483 | 81 | 0.739953 | [
"MIT"
] | DakaraProject/dakara-server | dakara_server/users/views.py | 2,961 | Python |
from lxml.etree import XMLSyntaxError
from onegov.ballot import Election
from onegov.ballot import ElectionCompound
from onegov.ballot import Vote
from onegov.core.widgets import transform_structure
from onegov.election_day import _
from onegov.election_day.models import Screen
from onegov.election_day.models.screen import ScreenType
from onegov.form import Form
from onegov.form.fields import ChosenSelectField
from onegov.form.fields import CssField
from onegov.form.fields import PanelField
from onegov.form.validators import UniqueColumnValue
from wtforms import IntegerField
from wtforms import RadioField
from wtforms import StringField
from wtforms import TextAreaField
from wtforms import ValidationError
from wtforms.validators import InputRequired
from wtforms.validators import NumberRange
from wtforms.validators import Optional
class ScreenForm(Form):
number = IntegerField(
label=_('Number'),
validators=[
InputRequired(),
NumberRange(min=1),
UniqueColumnValue(Screen)
]
)
group = StringField(
label=_('Group'),
description=_(
'Use the same group for all screens you want to cycle through.'
),
)
duration = IntegerField(
label=_('Duration'),
description=_(
'Number of seconds this screen is presented if cycling trough '
'screens. If none is set, 20 seconds are used.'
),
validators=[
NumberRange(min=1),
Optional()
],
)
description = StringField(
label=_('Description')
)
type = RadioField(
label=_('Type'),
choices=[
(
'simple_vote',
_('Simple Vote')
),
(
'complex_vote',
_('Vote with Counter-Proposal')
),
(
'majorz_election',
_('Election based on the simple majority system')
),
(
'proporz_election',
_('Election based on proportional representation')
),
(
'election_compound',
_('Compound of Elections')
),
],
validators=[
InputRequired()
],
default='simple_vote'
)
simple_vote = ChosenSelectField(
_('Vote'),
choices=[],
validators=[
InputRequired()
],
depends_on=('type', 'simple_vote'),
)
complex_vote = ChosenSelectField(
_('Vote'),
choices=[],
validators=[
InputRequired()
],
depends_on=('type', 'complex_vote'),
)
majorz_election = ChosenSelectField(
_('Election'),
choices=[],
validators=[
InputRequired()
],
depends_on=('type', 'majorz_election'),
)
proporz_election = ChosenSelectField(
_('Election'),
choices=[],
validators=[
InputRequired()
],
depends_on=('type', 'proporz_election'),
)
election_compound = ChosenSelectField(
_('Compound of Elections'),
choices=[],
validators=[
InputRequired()
],
depends_on=('type', 'election_compound'),
)
tags_simple_vote = PanelField(
label=_('Available tags'),
text='',
kind='',
depends_on=('type', 'simple_vote'),
)
tags_complex_vote = PanelField(
label=_('Available tags'),
text='',
kind='',
depends_on=('type', 'complex_vote'),
)
tags_majorz_election = PanelField(
label=_('Available tags'),
text='',
kind='',
depends_on=('type', 'majorz_election'),
)
tags_proporz_election = PanelField(
label=_('Available tags'),
text='',
kind='',
depends_on=('type', 'proporz_election'),
)
tags_election_compound = PanelField(
label=_('Available tags'),
text='',
kind='',
depends_on=('type', 'election_compound'),
)
structure = TextAreaField(
label=_('Structure'),
render_kw={'rows': 32},
validators=[
InputRequired()
],
)
css = CssField(
label=_('Additional CSS'),
render_kw={'rows': 10},
)
def get_widgets(self, type_):
registry = self.request.app.config.screen_widget_registry
return registry.by_categories(ScreenType(type_).categories)
def validate_structure(self, field):
widgets = self.get_widgets(self.type.data)
if field.data:
try:
transform_structure(widgets.values(), field.data)
except XMLSyntaxError as e:
raise ValidationError(e.msg.split(', line')[0])
def update_model(self, model):
model.number = self.number.data
model.group = self.group.data
model.duration = self.duration.data
model.description = self.description.data
model.type = self.type.data
model.vote_id = None
model.election_id = None
model.election_compound_id = None
if self.type.data == 'simple_vote':
model.vote_id = self.simple_vote.data
elif self.type.data == 'complex_vote':
model.vote_id = self.complex_vote.data
elif self.type.data == 'majorz_election':
model.election_id = self.majorz_election.data
elif self.type.data == 'proporz_election':
model.election_id = self.proporz_election.data
elif self.type.data == 'election_compound':
model.election_compound_id = self.election_compound.data
model.structure = self.structure.data
model.css = self.css.data
def apply_model(self, model):
self.number.data = model.number
self.group.data = model.group
self.duration.data = model.duration
self.description.data = model.description
self.type.data = model.type
self.simple_vote.data = ''
self.complex_vote.data = ''
self.majorz_election.data = ''
self.proporz_election.data = ''
self.election_compound.data = ''
if self.type.data == 'simple_vote':
self.simple_vote.data = model.vote_id
elif self.type.data == 'complex_vote':
self.complex_vote.data = model.vote_id
elif self.type.data == 'majorz_election':
self.majorz_election.data = model.election_id
elif self.type.data == 'proporz_election':
self.proporz_election.data = model.election_id
elif self.type.data == 'election_compound':
self.election_compound.data = model.election_compound_id
self.structure.data = model.structure
self.css.data = model.css
def on_request(self):
session = self.request.session
query = session.query(Vote).filter_by(type='simple')
self.simple_vote.choices = [
(vote.id, vote.title)
for vote in query.order_by(Vote.shortcode)
]
query = session.query(Vote).filter_by(type='complex')
self.complex_vote.choices = [
(vote.id, vote.title)
for vote in query.order_by(Vote.shortcode)
]
query = session.query(Election).filter_by(type='majorz')
self.majorz_election.choices = [
(election.id, election.title)
for election in query.order_by(Election.shortcode)
]
query = session.query(Election).filter_by(type='proporz')
self.proporz_election.choices = [
(election.id, election.title)
for election in query.order_by(Election.shortcode)
]
query = session.query(ElectionCompound)
query = query.order_by(ElectionCompound.shortcode)
self.election_compound.choices = [
(election_compound.id, election_compound.title)
for election_compound in query
]
self.tags_simple_vote.text = '\n'.join(sorted([
tag.usage for tag in self.get_widgets('simple_vote').values()
]))
self.tags_complex_vote.text = '\n'.join(sorted([
tag.usage for tag in self.get_widgets('complex_vote').values()
]))
self.tags_majorz_election.text = '\n'.join(sorted([
tag.usage for tag in self.get_widgets('majorz_election').values()
]))
self.tags_proporz_election.text = '\n'.join(sorted([
tag.usage for tag in self.get_widgets('proporz_election').values()
]))
self.tags_election_compound.text = '\n'.join(sorted([
tag.usage for tag in self.get_widgets('election_compound').values()
]))
| 30.49481 | 79 | 0.588335 | [
"MIT"
] | politbuero-kampagnen/onegov-cloud | src/onegov/election_day/forms/screen.py | 8,813 | Python |
import pytest
import json
import ipaddress
import time
import natsort
import random
import re
from collections import defaultdict
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, copy_arp_responder_py
from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip
from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports
from tests.common.dualtor.mux_simulator_control import mux_server_url
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor
from tests.common.utilities import wait_until, get_intf_by_sub_intf
from tests.common import config_reload
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from pkg_resources import parse_version
from tests.common import constants
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.device_type('vs')
]
def skip_201911_and_older(duthost):
""" Skip the current test if the DUT version is 201911 or older.
"""
if parse_version(duthost.kernel_version) <= parse_version('4.9.0'):
pytest.skip("Test not supported for 201911 images or older. Skipping the test")
def is_dualtor(tbinfo):
"""Check if the testbed is dualtor."""
return "dualtor" in tbinfo["topo"]["name"]
def add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_interfaces, ipv6=False):
if ipv6:
for idx in range(len(nexthop_addrs)):
ptfhost.shell("ip -6 addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_interfaces[idx]), module_ignore_errors=True)
else:
vlan_host_map = defaultdict(dict)
for idx in range(len(nexthop_addrs)):
mac = ptfadapter.dataplane.get_mac(0, int(get_intf_by_sub_intf(nexthop_interfaces[idx]))).replace(":", "")
vlan_host_map[nexthop_interfaces[idx]][nexthop_addrs[idx]] = mac
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell('supervisorctl reread && supervisorctl update')
ptfhost.shell('supervisorctl restart arp_responder')
def del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False):
if ipv6:
for idx in range(len(nexthop_addrs)):
ptfhost.shell("ip -6 addr del {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
else:
ptfhost.shell('supervisorctl stop arp_responder')
def clear_arp_ndp(duthost, ipv6=False):
if ipv6:
duthost.shell("sonic-clear ndp")
else:
duthost.shell("sonic-clear arp")
def generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, expected_ports, ipv6=False):
if ipv6:
pkt = testutils.simple_tcpv6_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_src='2001:db8:85a3::8a2e:370:7334',
ipv6_dst=ip_dst,
ipv6_hlim=64,
tcp_sport=1234,
tcp_dport=4321)
else:
pkt = testutils.simple_tcp_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src='1.1.1.1',
ip_dst=ip_dst,
ip_ttl=64,
tcp_sport=1234,
tcp_dport=4321)
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
if ipv6:
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'chksum')
else:
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
ptf_t1_intf = random.choice(get_t1_ptf_ports(duthost, tbinfo))
ptf_t1_intf_index = int(ptf_t1_intf.strip("eth"))
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, ptf_t1_intf_index, pkt)
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=expected_ports)
def check_route_redistribution(duthost, prefix, ipv6, removed=False):
if ipv6:
bgp_neighbor_addr_regex = re.compile(r"^([0-9a-fA-F]{1,4}:[0-9a-fA-F:]+)")
SHOW_BGP_SUMMARY_CMD = "show ipv6 bgp summary"
SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE = "show ipv6 bgp neighbor {} advertised-routes"
else:
bgp_neighbor_addr_regex = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}")
SHOW_BGP_SUMMARY_CMD = "show ip bgp summary"
SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE = "show ip bgp neighbor {} advertised-routes"
bgp_summary = duthost.shell(SHOW_BGP_SUMMARY_CMD, module_ignore_errors=True)["stdout"].split("\n")
bgp_neighbors = []
for line in bgp_summary:
matched = bgp_neighbor_addr_regex.match(line)
if matched:
bgp_neighbors.append(str(matched.group(0)))
for neighbor in bgp_neighbors:
adv_routes = duthost.shell(SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE.format(neighbor))["stdout"]
if removed:
assert prefix not in adv_routes
else:
assert prefix in adv_routes
def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=False, config_reload_test=False):
# Clean up arp or ndp
clear_arp_ndp(duthost, ipv6=ipv6)
# Add ipaddresses in ptf
add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_interfaces, ipv6=ipv6)
try:
# Add static route
duthost.shell("sonic-db-cli CONFIG_DB hmset 'STATIC_ROUTE|{}' nexthop {}".format(prefix, ",".join(nexthop_addrs)))
time.sleep(5)
# Check traffic get forwarded to the nexthop
ip_dst = str(ipaddress.ip_network(unicode(prefix))[1])
generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6)
# Check the route is advertised to the neighbors
check_route_redistribution(duthost, prefix, ipv6)
# Config save and reload if specified
if config_reload_test:
duthost.shell('config save -y')
config_reload(duthost, wait=350)
generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6)
check_route_redistribution(duthost, prefix, ipv6)
finally:
# Remove static route
duthost.shell("sonic-db-cli CONFIG_DB del 'STATIC_ROUTE|{}'".format(prefix), module_ignore_errors=True)
# Delete ipaddresses in ptf
del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6)
# Check the advertised route get removed
time.sleep(5)
check_route_redistribution(duthost, prefix, ipv6, removed=True)
# Config save if the saved config_db was updated
if config_reload_test:
duthost.shell('config save -y')
# Clean up arp or ndp
clear_arp_ndp(duthost, ipv6=ipv6)
def get_nexthops(duthost, tbinfo, ipv6=False, count=1):
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
vlan_intf = mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]
prefix_len = vlan_intf['prefixlen']
is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False)
if is_dualtor(tbinfo):
server_ips = mux_cable_server_ip(duthost)
vlan_intfs = natsort.natsorted(server_ips.keys())
nexthop_devs = [mg_facts["minigraph_ptf_indices"][_] for _ in vlan_intfs]
server_ip_key = "server_ipv6" if ipv6 else "server_ipv4"
nexthop_addrs = [server_ips[_][server_ip_key].split("/")[0] for _ in vlan_intfs]
nexthop_interfaces = nexthop_devs
else:
vlan_subnet = ipaddress.ip_network(vlan_intf['subnet'])
vlan = mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]['attachto']]
vlan_ports = vlan['members']
vlan_id = vlan['vlanid']
vlan_ptf_ports = [mg_facts['minigraph_ptf_indices'][port] for port in vlan_ports]
nexthop_devs = vlan_ptf_ports
# backend topology use ethx.x(e.g. eth30.1000) during servers and T0 in ptf
# in other topology use ethx(e.g. eth30)
if is_backend_topology:
nexthop_interfaces = [str(dev) + constants.VLAN_SUB_INTERFACE_SEPARATOR + str(vlan_id) for dev in nexthop_devs]
else:
nexthop_interfaces = nexthop_devs
nexthop_addrs = [str(vlan_subnet[i + 2]) for i in range(len(nexthop_devs))]
count = min(count, len(nexthop_devs))
indices = random.sample(list(range(len(nexthop_devs))), k=count)
return prefix_len, [nexthop_addrs[_] for _ in indices], [nexthop_devs[_] for _ in indices], [nexthop_interfaces[_] for _ in indices]
def test_static_route(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor):
duthost = rand_selected_dut
skip_201911_and_older(duthost)
prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo)
run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "1.1.1.0/24",
nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces)
def test_static_route_ecmp(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor):
duthost = rand_selected_dut
skip_201911_and_older(duthost)
prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, count=3)
run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2.2.2.0/24",
nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, config_reload_test=True)
def test_static_route_ipv6(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor):
duthost = rand_selected_dut
skip_201911_and_older(duthost)
prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, ipv6=True)
run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2000:1::/64",
nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=True)
def test_static_route_ecmp_ipv6(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor):
duthost = rand_selected_dut
skip_201911_and_older(duthost)
prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, ipv6=True, count=3)
run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2000:2::/64",
nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=True, config_reload_test=True)
| 43.924901 | 171 | 0.710609 | [
"Apache-2.0"
] | LiuKuan-AF/sonic-mgmt | tests/route/test_static_route.py | 11,113 | Python |
"""
Flake8 plugin to encourage correct string literal concatenation.
Forbid implicitly concatenated string literals on one line such as those
introduced by Black.
Forbid all explicitly concatenated strings, in favour of implicit concatenation.
"""
from __future__ import generator_stop
import ast
import tokenize
from typing import Iterable, List, Tuple
import attr
import more_itertools
__all__ = ["__version__", "Checker"]
__version__ = "0.1.0"
_ERROR = Tuple[int, int, str, None]
def _implicit(file_tokens: Iterable[tokenize.TokenInfo]) -> Iterable[_ERROR]:
return (
(
*a.end,
"ISC001 implicitly concatenated string literals on one line"
if a.end[0] == b.start[0]
else "ISC002 implicitly concatenated string literals "
"over continuation line",
None,
)
for (a, b) in more_itertools.pairwise(file_tokens)
if a.type == b.type == tokenize.STRING
)
def _explicit(root_node: ast.AST) -> Iterable[_ERROR]:
return (
(
node.lineno,
node.col_offset,
"ISC003 explicitly concatenated string should be implicitly concatenated",
None,
)
for node in ast.walk(root_node)
if isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and all(
isinstance(operand, (ast.Str, ast.Bytes, ast.JoinedStr))
for operand in [node.left, node.right]
)
)
@attr.s(frozen=True, auto_attribs=True)
class Checker:
name = __name__
version = __version__
tree: ast.AST
file_tokens: List[tokenize.TokenInfo]
def run(self) -> Iterable[_ERROR]:
yield from _implicit(self.file_tokens)
yield from _explicit(self.tree)
| 26.567164 | 86 | 0.645506 | [
"MIT"
] | graingert/flake8-implicit-str-concat | flake8_implicit_str_concat.py | 1,780 | Python |
from django.views.generic.edit import UpdateView
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from user.models import User
from user.forms import UpdateUserForm
class EditProfileView(UpdateView):
model = User
form_class = UpdateUserForm
template_name = 'edit_health_professional.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EditProfileView, self).dispatch(*args, **kwargs)
def get_object(self):
return self.request.user
def get_context_data(self, **kwargs):
context = super(EditProfileView, self).get_context_data(**kwargs)
is_health_professional = hasattr(self.request.user, 'healthprofessional')
if is_health_professional:
template = "dashboardHealthProfessional/template.html"
else:
template = "dashboardPatient/template.html"
context['template'] = template
return context
def get_success_url(self):
return reverse_lazy('edit_profile') | 33 | 81 | 0.728164 | [
"MIT"
] | fga-eps-mds/2017.2-Receita-Mais | medical_prescription/user/views/editprofile.py | 1,122 | Python |
import cv2
import numpy as np
# import gt_utils
def binarize(img):
""" Take an RGB image and binarize it.
:param img: cv2 image
:return:
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, bin = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
return bin
def makecoloredlayer(img, mask, color=(0, 255, 0)):
""" Create an image based on provided mask, dimensions and color (default is red)
:param mask: binary mask defining the zone needing color, the rest will be black
:param img: numpy.array used for dimensions
:param color: 3 dimension tuple (B,G,R)
:return:
"""
coloredlayer = np.zeros(img.shape, dtype="uint8")
# cv2.rectangle(img, (x,y), (x,y), color, -1)
cv2.rectangle(coloredlayer, (0, 0), (img.shape[1], img.shape[0]), color, -1)
maskedlayer = cv2.bitwise_or(img, coloredlayer, mask=mask)
return maskedlayer
def makemarkedmask(maskA, maskB):
""" create a new mask based on existing image and coming mask
:param maskA: binary image
:param maskB: binary image
:return: binary image
"""
inter = cv2.bitwise_xor(maskA, maskB)
inter = cv2.bitwise_and(inter, maskB)
inter = cv2.bitwise_xor(inter, maskB)
markedmask = cv2.bitwise_not(inter)
return markedmask
def applymark(img, mask):
""" Apply a mask to an image to keep only active cells.
:return: image
"""
img = cv2.bitwise_and(img, img, mask=mask)
return img
def makeannotatedimage(masks, colors):
""" Combine layers and create an image combining several annotations
:param masks: list of images used as masks
:param colors: list of colors
:return:
"""
if len(masks) != len(colors):
print("Error: annotation and colors do not match.")
return False
else:
if len(masks) == 0:
print("Error: no mask to combine.")
return False
else:
# combo is canvas (y,x,3)
combo = np.zeros(masks[0].shape, dtype='uint8')
i = 0
# binarize images to make masks
bins = []
for mask in masks:
bin = binarize(mask)
bins.append(bin)
# isolate first mask/layer from the rest
firstmask = bins[:1]
combo = makecoloredlayer(combo, firstmask[0], colors[i])
if len(bins) > 1:
other_masks = bins[1:]
# adding other layers
for mask in other_masks:
i += 1
newmask = binarize(combo)
markedout = makemarkedmask(newmask, mask)
combo = applymark(combo, markedout)
newlayer = makecoloredlayer(combo, mask, colors[i])
combo = cv2.bitwise_or(combo, newlayer)
return combo
| 30.052632 | 85 | 0.589492 | [
"MIT"
] | alix-tz/GT_generator | GT_generator/gt_image.py | 2,855 | Python |
#!/usr/bin/env python
# coding: utf-8
# list
classMates = ['Micheal', 'Lucy', 'Anna']
print classMates
# 获取长度
print len(classMates)
# 取值
print classMates[2]
print classMates[-1]
# 追加
classMates.append('Adam')
print classMates
# 插入
classMates.insert(1, 'Paul')
print classMates
# 删除
classMates.pop()
print classMates
classMates.pop(0)
print classMates
# 替换
classMates[1] = 'Sam'
print classMates
# 类型无需一致
L = ['Anna', 22, True]
print L
# 嵌套
s = ['Python', 'Ruby', ['Java', 'objc']]
print s
print s[2][1]
# tuple
classMates = ('Micheal', 'Lucy', 'Adam')
print classMates
# 因为tuple不可变,所以代码更安全。如果可能,能用tuple代替list就尽量用tuple
t = (1, 2)
print t
t = ()
print t
t = (1)
print t
t = (1,)
print t
t = ('a', 'b', ['A', 'B'])
t [2][0] = 'X'
t [2][1] = 'Y'
print t
| 11.323529 | 48 | 0.632468 | [
"MIT"
] | atcuan/Python | liaoxuefeng.com/004-ListAndTuple.py | 868 | Python |
_base_ = [
'../../_base_/models/faster_rcnn_r50_fpn.py',
'../dataset.py',
'../../_base_/schedules/schedule_1x.py',
'../../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=11
)
)
)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
checkpoint_config = dict(max_keep_ckpts=3, interval=1)
| 21.684211 | 60 | 0.621359 | [
"Apache-2.0"
] | Pstage-Segmentation-Detection/mmdetection_trash | configs/trash/faster_rcnn/faster_rcnn_r50_fpn_1x_trash.py | 412 | Python |
from fastapi import APIRouter
from modules.core.converter import Converter
from modules.schemas.converter import ConverterSchema
converter = APIRouter()
@converter.post('/convert', tags=['converter'])
def convert_document(convert: ConverterSchema):
result = Converter.convert(convert.source_format, convert.target_format, convert.content)
return dict(status="converted", result_format=convert.target_format, result=result)
| 33.461538 | 93 | 0.806897 | [
"MIT"
] | atthealchemist/graduation-project-system-backend | modules/routers/converter.py | 435 | Python |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from brainstorm.structure.construction import UniquelyNamed
def test_basename():
n = UniquelyNamed('my_basename')
assert n.name == 'my_basename'
def test_merging_scopes_no_conflict():
n1 = UniquelyNamed('A')
n2 = UniquelyNamed('B')
n1.merge_scopes(n2)
assert n1.name == 'A'
assert n2.name == 'B'
def test_multimerging_no_conflict():
n1 = UniquelyNamed('A')
n2 = UniquelyNamed('B')
n1.merge_scopes(n2)
n2.merge_scopes(n1)
n2.merge_scopes(n1)
n1.merge_scopes(n2)
assert n1.name == 'A'
assert n2.name == 'B'
def test_merging_scopes():
n1 = UniquelyNamed('my_basename')
n2 = UniquelyNamed('my_basename')
n1.merge_scopes(n2)
assert n1.name == 'my_basename_1'
assert n2.name == 'my_basename_2'
def test_merging_scopes_symmetric():
n1 = UniquelyNamed('my_basename')
n2 = UniquelyNamed('my_basename')
n2.merge_scopes(n1)
assert n1.name == 'my_basename_1'
assert n2.name == 'my_basename_2'
def test_merging_scopes_transitiv():
n1 = UniquelyNamed('my_basename')
n2 = UniquelyNamed('my_basename')
n3 = UniquelyNamed('my_basename')
n1.merge_scopes(n2)
n2.merge_scopes(n3)
assert n1.name == 'my_basename_1'
assert n2.name == 'my_basename_2'
assert n3.name == 'my_basename_3'
def test_merging_scopes_transitiv2():
n1 = UniquelyNamed('my_basename')
n2 = UniquelyNamed('my_basename')
n3 = UniquelyNamed('my_basename')
n4 = UniquelyNamed('my_basename')
n1.merge_scopes(n2)
n3.merge_scopes(n4)
n2.merge_scopes(n4)
assert n1.name == 'my_basename_1'
assert n2.name == 'my_basename_2'
assert n3.name == 'my_basename_3'
assert n4.name == 'my_basename_4'
def test_sneaky_name_collision():
n1 = UniquelyNamed('A_2')
n2 = UniquelyNamed('A')
n3 = UniquelyNamed('A')
n4 = UniquelyNamed('A')
n1.merge_scopes(n2)
n2.merge_scopes(n3)
n3.merge_scopes(n4)
assert n1.name == 'A_2'
assert n2.name == 'A_1'
assert n3.name == 'A_3'
assert n4.name == 'A_4'
def test_no_sneaky_name_collision():
n0 = UniquelyNamed('A_2')
n1 = UniquelyNamed('A_2')
n2 = UniquelyNamed('A')
n3 = UniquelyNamed('A')
n4 = UniquelyNamed('A')
n0.merge_scopes(n1)
n1.merge_scopes(n2)
n2.merge_scopes(n3)
n3.merge_scopes(n4)
assert n0.name == 'A_2_1'
assert n1.name == 'A_2_2'
assert n2.name == 'A_1'
assert n3.name == 'A_2'
assert n4.name == 'A_3'
def test_separate_scopes():
n0 = UniquelyNamed('A')
n1 = UniquelyNamed('A')
n2 = UniquelyNamed('A')
n3 = UniquelyNamed('A')
n1.merge_scopes(n0)
n2.merge_scopes(n3)
assert n0.name == 'A_1'
assert n1.name == 'A_2'
assert n2.name == 'A_1'
assert n3.name == 'A_2'
| 24.675214 | 65 | 0.656391 | [
"MIT"
] | IDSIA/brainstorm | brainstorm/tests/test_uniquely_named.py | 2,887 | Python |
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
N = len(s)
prev = [0] * (N+1)
for i in range(N+1):
curr = [0] * (N+1)
for j in range(N+1):
if not i or not j:
curr[j] = 0
elif s[i-1] == s[N-j]:
curr[j] = prev[j-1] + 1
else:
curr[j] = max(prev[j], curr[j-1])
prev = curr[:]
return curr[N]
| 28.588235 | 53 | 0.364198 | [
"MIT"
] | VVKot/coding-competitions | leetcode/python/516_longest_palindromic_subsequence.py | 486 | Python |
import scrapy
import json
class RussianAlphabetSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'https://www.russianforeveryone.com/',
]
def parse(self, response):
alphabets_urls = response.xpath('//a[contains(.,"alphabet")]')
yield from response.follow_all(alphabets_urls, callback=self.parse_alphabet)
def parse_alphabet(self, response):
tables = response.xpath('//table[contains(.,"Letter")]')
rows = tables.xpath("*")
alphabet_dict = {}
i = 0
for row in rows:
alphabet_dict[i] = {
"text": row.xpath("descendant::node()/text()[normalize-space()]").getall(),
"image": row.xpath("descendant::node()/img[@src]/@src").getall(),
"sound": row.xpath("descendant::node()/a/@onclick").getall(),
}
i += 1
with open('banana_crawler/out/russian_alphabet.json', 'w') as f:
json.dump(alphabet_dict, f, indent=4)
| 31.53125 | 91 | 0.570862 | [
"MIT"
] | robsonzagrejr/banana-crawler | banana_crawler/spiders/russian_alphabet.py | 1,009 | Python |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.realpath(os.getcwd())) | 20.2 | 61 | 0.759076 | [
"BSD-3-Clause"
] | goel96vibhor/AdvSentEval | examples/__init__.py | 303 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import ibacoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (ibacoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| 34.328125 | 70 | 0.640874 | [
"MIT"
] | IBACOIN/IBA | test/functional/rpc_getchaintips.py | 2,197 | Python |
"""safe name
Revision ID: 9332f05cb7d6
Revises: 30228d27a270
Create Date: 2020-05-24 23:49:06.195432
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9332f05cb7d6'
down_revision = '30228d27a270'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base', sa.Column('nameSafe', sa.String(length=32), nullable=True))
op.create_index(op.f('ix_base_nameSafe'), 'base', ['nameSafe'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_base_nameSafe'), table_name='base')
op.drop_column('base', 'nameSafe')
# ### end Alembic commands ###
| 25.806452 | 85 | 0.69125 | [
"MIT"
] | TimmySly/ctaCompanion | migrations/versions/9332f05cb7d6_safe_name.py | 800 | Python |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
| 35.721118 | 79 | 0.640149 | [
"Apache-2.0"
] | dholth/pyopenssl | tests/test_ssl.py | 141,920 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import time
import os
from celery import Celery
from celery import states as celery_states
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import import_string
'''
To start the celery worker, run the command:
airflow worker
'''
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
app = Celery(
configuration.conf.get('celery', 'CELERY_APP_NAME'),
config_source=celery_configuration)
@app.task
def execute_command(command):
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
class CeleryExecutor(BaseExecutor):
"""
CeleryExecutor is recommended for production use of Airflow. It allows
distributing the execution of task instances to multiple worker nodes.
Celery is a simple, flexible and reliable distributed system to process
vast amounts of messages, while providing operations with the tools
required to maintain such a system.
"""
def start(self):
self.tasks = {}
self.last_state = {}
def execute_async(self, key, command,
queue=DEFAULT_CELERY_CONFIG['task_default_queue'],
executor_config=None):
self.log.info("[celery] queuing {key} through celery, "
"queue={queue}".format(**locals()))
self.tasks[key] = execute_command.apply_async(
args=[command], queue=queue)
self.last_state[key] = celery_states.PENDING
def sync(self):
self.log.debug("Inquiring about %s celery task(s)", len(self.tasks))
for key, task in list(self.tasks.items()):
try:
state = task.state
if self.last_state[key] != state:
if state == celery_states.SUCCESS:
self.success(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.FAILURE:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.REVOKED:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
else:
self.log.info("Unexpected state: %s", state)
self.last_state[key] = state
except Exception as e:
self.log.error("Error syncing the celery executor, ignoring it:")
self.log.exception(e)
def end(self, synchronous=False):
if synchronous:
while any([
task.state not in celery_states.READY_STATES
for task in self.tasks.values()]):
time.sleep(5)
self.sync()
| 37.075 | 81 | 0.642841 | [
"Apache-2.0"
] | SpaceApplications/airflow | airflow/executors/celery_executor.py | 4,449 | Python |
'''
Minimum number of jumps to reach end
Given an array of integers where each element represents
the max number of steps that can be made forward from that
element. Write a function to return the minimum number of
jumps to reach the end of the array (starting from the first element).
If an element is 0, they cannot move through that element. If
the end isn’t reachable, return -1.
----------------------------------------
Solution :
Time Complexity : O(n)
Aux Space : O(1)
'''
class Solution():
def minJumps(self,arr,n):
if len(arr) == 1 :
return 1
if arr[0] == 0 :
return -1
jumps = 1
reach = arr[0]
steps = arr[0]
for i in range(1,n-1):
reach = max(reach , arr[i] + i)
steps-=1
if steps == 0 :
jumps+=1
if i >= reach :
return -1
steps = reach - i
return jumps
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int,input().split()))
ob = Solution()
res = ob.minJumps(arr,n)
print(res) | 24.632653 | 71 | 0.495443 | [
"MIT"
] | AddinDev/Hackoween-Hacktoberfest2021 | Projects/Python/Minimum Jumps to Reach the End.py | 1,209 | Python |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import os
import argparse
import json
import sys
import numpy as np
import torch
from flowtron import Flowtron
from torch.utils.data import DataLoader
from data import Data
from train import update_params
sys.path.insert(0, "tacotron2")
sys.path.insert(0, "tacotron2/waveglow")
from glow import WaveGlow
from scipy.io.wavfile import write
def infer(flowtron_path, waveglow_path, output_dir, text, speaker_id, n_frames,
sigma, gate_threshold, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# load waveglow
waveglow = torch.load(waveglow_path)['model'].cuda().eval()
waveglow.cuda().half()
for k in waveglow.convinv:
k.float()
waveglow.eval()
# load flowtron
model = Flowtron(**model_config).cuda()
state_dict = torch.load(flowtron_path, map_location='cpu')['state_dict']
model.load_state_dict(state_dict)
model.eval()
print("Loaded checkpoint '{}')" .format(flowtron_path))
ignore_keys = ['training_files', 'validation_files']
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
speaker_vecs = trainset.get_speaker_id(speaker_id).cuda()
text = trainset.get_text(text).cuda()
speaker_vecs = speaker_vecs[None]
text = text[None]
with torch.no_grad():
residual = torch.cuda.FloatTensor(1, 80, n_frames).normal_() * sigma
mels, attentions = model.infer(
residual, speaker_vecs, text, gate_threshold=gate_threshold)
for k in range(len(attentions)):
attention = torch.cat(attentions[k]).cpu().numpy()
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
axes[0].imshow(mels[0].cpu().numpy(), origin='bottom', aspect='auto')
axes[1].imshow(attention[:, 0].transpose(), origin='bottom', aspect='auto')
fig.savefig(os.path.join(output_dir, 'sid{}_sigma{}_attnlayer{}.png'.format(speaker_id, sigma, k)))
plt.close("all")
with torch.no_grad():
audio = waveglow.infer(mels.half(), sigma=0.8).float()
audio = audio.cpu().numpy()[0]
# normalize audio for now
audio = audio / np.abs(audio).max()
print(audio.shape)
write(os.path.join(output_dir, 'sid{}_sigma{}.wav'.format(speaker_id, sigma)),
data_config['sampling_rate'], audio)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
parser.add_argument('-f', '--flowtron_path',
help='Path to flowtron state dict', type=str)
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow state dict', type=str)
parser.add_argument('-t', '--text', help='Text to synthesize', type=str)
parser.add_argument('-i', '--id', help='Speaker id', type=int)
parser.add_argument('-n', '--n_frames', help='Number of frames',
default=400, type=int)
parser.add_argument('-o', "--output_dir", default="results/")
parser.add_argument("-s", "--sigma", default=0.5, type=float)
parser.add_argument("-g", "--gate", default=0.5, type=float)
parser.add_argument("--seed", default=1234, type=int)
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
update_params(config, args.params)
data_config = config["data_config"]
global model_config
model_config = config["model_config"]
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
infer(args.flowtron_path, args.waveglow_path, args.output_dir, args.text,
args.id, args.n_frames, args.sigma, args.gate, args.seed)
| 36.969925 | 107 | 0.647346 | [
"Apache-2.0"
] | CardenB/flowtron | inference.py | 4,917 | Python |
# coding=utf-8
import pywikibot
import re
import urllib.request
from pywikibot import Claim, pagegenerators as pg
from time import sleep
from urllib.error import HTTPError
from utils.properties import PID_OK_ACCOUNT, PID_OK_PROFILE_ID
repo = pywikibot.Site('wikidata', 'wikidata')
def parse_ok_ru(account):
html = None
tries = 0
while html is None:
try:
html = urllib.request.urlopen('https://ok.ru/' + account).read().decode()
except HTTPError:
if tries > 3:
return None
tries += 1
sleep(2 ** tries)
match = re.search('<a data-module="AuthLoginPopup" href="/profile/(\\d+)"', html)
if match is None:
return None
return match.group(1)
def process_claim(claim):
if PID_OK_PROFILE_ID in claim.qualifiers:
return
account = claim.getTarget()
match = re.fullmatch('^profile/(\\d+)$', account)
if match:
account_id = match.group(1)
else:
account_id = parse_ok_ru(account)
if account_id is None:
print("%s -> group" % account)
return
print("%s -> %s" % (account, account_id))
qualifier = Claim(repo, PID_OK_PROFILE_ID)
qualifier.setTarget(account_id)
claim.addQualifier(qualifier)
return
def add_ok_numeric_id(item):
data = item.get()
if 'claims' not in data or PID_OK_ACCOUNT not in data['claims']:
return
for claim in data['claims'][PID_OK_ACCOUNT]:
process_claim(claim)
def iterate_items():
query = '''
SELECT ?item
{
?item p:P5163 ?statement .
?statement ps:P5163 ?value .
#?item wdt:P31 wd:Q5 .
?article schema:about ?item .
?article schema:isPartOf <https://ru.wikipedia.org/>.
FILTER REGEX(?value, "^(?!group/)", "i")
FILTER NOT EXISTS{ ?statement pq:P9269 [] }
}
'''
generator = pg.WikidataSPARQLPageGenerator(query, site=repo)
for item in generator:
add_ok_numeric_id(item)
sleep(5)
iterate_items()
| 25.207317 | 85 | 0.611998 | [
"Apache-2.0"
] | putnik/pbot | ok_add_ids.py | 2,067 | Python |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to add a campaign-level bid modifier for call interactions.
"""
from __future__ import absolute_import
import argparse
import six
import sys
import google.ads.google_ads.client
def main(client, customer_id, campaign_id, bid_modifier_value):
campaign_service = client.get_service('CampaignService', version='v1')
campaign_bm_service = client.get_service('CampaignBidModifierService',
version='v1')
# Create campaign bid modifier for call interactions with the specified
# campaign ID and bid modifier value.
campaign_bid_modifier_operation = client.get_type(
'CampaignBidModifierOperation')
campaign_bid_modifier = campaign_bid_modifier_operation.create
# Set the campaign.
campaign_bid_modifier.campaign.value = campaign_service.campaign_path(
customer_id, campaign_id)
# Set the bid modifier.
campaign_bid_modifier.bid_modifier.value = bid_modifier_value
# Sets the interaction type.
campaign_bid_modifier.interaction_type.type = (
client.get_type('InteractionTypeEnum', version='v1').CALLS)
# Add the campaign bid modifier.
try:
campaign_bm_response = (
campaign_bm_service.mutate_campaign_bid_modifiers(
customer_id, [campaign_bid_modifier_operation]))
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Created campaign bid modifier: %s.'
% campaign_bm_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description=('Adds a bid modifier to the specified campaign ID, for '
'the given customer ID.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=six.text_type,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-i', '--campaign_id', type=six.text_type,
required=True, help='The campaign ID.')
parser.add_argument('-b', '--bid_modifier_value', type=float,
required=False, default=1.5,
help='The bid modifier value.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.campaign_id,
args.bid_modifier_value)
| 41.375 | 79 | 0.68635 | [
"Apache-2.0"
] | jwygoda/google-ads-python | examples/campaign_management/add_campaign_bid_modifier.py | 3,641 | Python |
"""Utility functions for COVID19 UK data"""
import os
import re
import datetime
import numpy as np
import pandas as pd
def prependDate(filename):
now = datetime.now() # current date and time
date_time = now.strftime("%Y-%m-%d")
return date_time + "_" + filename
def prependID(filename, config):
return config["Global"]["prependID_Str"] + "_" + filename
def format_input_filename(filename, config):
# prepend with a set string
# to load a specific date, this should be in the string
p, f = os.path.split(filename)
if config["Global"]["prependID"]:
f = prependID(f, config)
filename = p + "/" + f
return filename
def format_output_filename(filename, config):
p, f = os.path.split(filename)
if config["Global"]["prependID"]:
f = prependID(f, config)
if config["Global"]["prependDate"]:
f = prependDate(f)
filename = p + "/" + f
return filename
def merge_lad_codes(lad19cd):
merging = {
"E06000052": "E06000052,E06000053", # City of London & Westminster
"E06000053": "E06000052,E06000053", # City of London & Westminster
"E09000001": "E09000001,E09000033", # Cornwall & Isles of Scilly
"E09000033": "E09000001,E09000033", # Cornwall & Isles of Scilly
}
lad19cd = lad19cd.apply(lambda x: merging[x] if x in merging.keys() else x)
return lad19cd
def merge_lad_values(df):
df = df.groupby("lad19cd").sum().reset_index()
return df
def get_date_low_high(config):
date_range = [np.datetime64(x) for x in config["date_range"]]
return tuple(date_range)
def check_date_format(df):
df = df.reset_index()
if (
not pd.to_datetime(df["date"], format="%Y-%m-%d", errors="coerce")
.notnull()
.all()
):
raise ValueError("Invalid date format")
return True
def check_date_bounds(df, date_low, date_high):
if not ((date_low <= df["date"]) & (df["date"] < date_high)).all():
raise ValueError("Date out of bounds")
return True
def check_lad19cd_format(df):
df = df.reset_index()
# Must contain 9 characters, 1 region letter followed by 8 numbers
split_code = df["lad19cd"].apply(lambda x: re.split("(\d+)", x))
if not split_code.apply(
lambda x: (len(x[0]) == 1) & (x[0] in "ENSW") & (len(x[1]) == 8)
).all():
raise ValueError("Invalid lad19cd format")
return True
def invalidInput(input):
raise NotImplementedError(f'Input type "{input}" mode not implemented')
| 26.3125 | 79 | 0.638163 | [
"MIT"
] | chrism0dwk/covid19uk | covid19uk/data/util.py | 2,526 | Python |
#!/bin/env python
#
# Copyright 2014 Alcatel-Lucent Enterprise.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# $File: omniswitch_restful_driver.py$
# $Build: OONP_H_R01_6$
# $Date: 05/06/2014 12:10:39$
# $Author: vapoonat$
#
#
import logging
import os
import sys
import importlib
import traceback
import urllib
import urllib2
import time
import thread
import threading
from neutron.plugins.omniswitch.consumer import AOSAPI, AOSConnection
#from neutron.plugins.omniswitch.omniswitch_driver_base import OmniSwitchDeviceDriverBase
LOG = logging.getLogger(__name__)
class OmniSwitchRestfulDriver(object):
"""
Name: OmniSwitchRestfulDriver
Description: OmniSwitch device driver to communicate with OS6900 and OS10K devices which support
RESTful interface.
Details: It is used by OmniSwitchDevicePluginV2 to perform the necessary configuration on the physical
switches as response to OpenStack networking APIs. This driver is used only for OS6900, OS10K
and OS6860 devices which support RESTful APIs to configure them. This driver requires the following
minimum version of AOS SW to be running on these devices...
OS10K : 732-R01-GA
OS6900 : 733-R01-GA
OS6860 : 811-R01--GA
It uses the "consumer.py" library provided as a reference implementation from the AOS/OmniSwitch
point of view. No changes is made as part of OmniSwitch plug-in or driver development. For
latest version of consumer.py, refer "//depot/7.3.3.R01/sw/management/web/consumer/consumer.py".
For any issues/bugs with the library, please contact AOS 7x WebService module owner
(Chris Ravanscoft)
"""
switch_ip = None
switch_login = None
switch_password = None
switch_prompt = None
threadLock = None
_init_done = False
### user configs
switch_vlan_name_prefix = ''
def __init__(self, ip, login='admin', password='switch', prompt='->'):
self.switch_ip = ip.strip()
if len(self.switch_ip) == 0 :
LOG.info("Init Error! Must provide a valid IP address!!!")
return
self.switch_login = login.strip()
if len(self.switch_login) == 0 :
self.switch_login = 'admin'
self.switch_password = password.strip()
if len(self.switch_password) == 0 :
self.switch_password = 'switch'
self.switch_prompt = prompt.strip()
if len(self.switch_prompt) == 0 :
self.switch_prompt = '->'
self.aosapi = AOSAPI(AOSConnection(
self.switch_login,
self.switch_password,
self.switch_ip,
True,
True,
True,
-1,
None,
0,
False))
self.threadLock = threading.Lock()
self._init_done = True
def set_config(self, vlan_name_prefix):
self.switch_vlan_name_prefix = vlan_name_prefix
def connect(self):
if self._init_done == False :
LOG.info("Driver is not initialized!!!")
return False
try:
results = self.aosapi.login()
if not self.aosapi.success():
LOG.info("Login error %s: %s", self.switch_ip, results)
return False
else:
return True
except urllib2.HTTPError, e :
self.aosapi.logout()
LOG.info("Connect Error %s: %s", self.switch_ip, e)
return False
def disconnect(self):
self.aosapi.logout()
###beware lock used!!!, dont call this func from another locked func
def create_vpa(self, vlan_id, slotport, args=None):
self.threadLock.acquire(1)
ret = False
if self.connect() == False:
self.threadLock.release()
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.put('mib', 'vpaTable',
{'mibObject0':'vpaIfIndex:'+str(ifindex),
'mibObject1':'vpaVlanNumber:'+str(vlan_id),
'mibObject2':'vpaType:2'})['result']
if self.aosapi.success():
LOG.info("vpa %s --> %s created in %s successfully!", vlan_id, slotport, self.switch_ip)
ret = True
else:
LOG.info("vpa %s --> %s creation in %s failed! %s", vlan_id, slotport, self.switch_ip, results)
self.disconnect()
self.threadLock.release()
return ret
###beware lock used!!!, dont call this func from another locked func
def delete_vpa(self, vlan_id, slotport, args=None):
self.threadLock.acquire(1)
ret = False
if self.connect() == False:
self.threadLock.release()
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.delete('mib', 'vpaTable',
{'mibObject0':'vpaIfIndex:'+str(ifindex),
'mibObject1':'vpaVlanNumber:'+str(vlan_id)})['result']
if self.aosapi.success():
LOG.info("vpa %s --> %s deleted in %s successfully!", vlan_id, slotport, self.switch_ip)
ret = True
else:
LOG.info("vpa %s --> %s deletion in %s failed! %s", vlan_id, slotport, self.switch_ip, results)
self.disconnect()
self.threadLock.release()
return ret
def create_vlan_locked(self, vlan_id, net_name=''):
self.threadLock.acquire(1)
ret = self.create_vlan(vlan_id, net_name)
self.threadLock.release()
return ret
def create_vlan(self, vlan_id, net_name=''):
ret = False
if self.connect() == False:
return False
vlan_name = self.switch_vlan_name_prefix+'-'+net_name+'-'+str(vlan_id)
results = self.aosapi.put('mib', 'vlanTable',
{'mibObject0':'vlanNumber:'+str(vlan_id),
'mibObject1':'vlanDescription:'+vlan_name})['result']
#'mibObject1':'vlanDescription:OpenStack-'+str(vlan_id)})['result']
if self.aosapi.success():
LOG.info("vlan %s created in %s successfully!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("vlan %s creation in %s failed! %s", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def delete_vlan_locked(self, vlan_id):
self.threadLock.acquire(1)
ret = self.delete_vlan(vlan_id)
self.threadLock.release()
return ret
def delete_vlan(self, vlan_id):
ret = False
if self.connect() == False:
return False
results = self.aosapi.delete('mib', 'vlanTable', {'mibObject0':'vlanNumber:'+str(vlan_id)})['result']
if self.aosapi.success():
LOG.info("vlan %s deleted in %s successfully!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("vlan %s deletion in %s failed! %s", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def create_unp_vlan(self, vlan_id, args=None):
ret = False
if self.connect() == False:
return False
results = self.aosapi.put('mib', 'alaDaUserNetProfileTable',
{'mibObject0':'alaDaUserNetProfileName:' +'OpenStack-UNP-'+str(vlan_id),
'mibObject1':'alaDaUserNetProfileVlanID:'+str(vlan_id)})['result']
if self.aosapi.success():
LOG.info("unp_vlan %s creation in %s success!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("unp_vlan %s creation in %s failed! %s", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def create_unp_macrule(self, vlan_id, mac, args=None):
ret = False
if self.connect() == False:
return False
results = self.aosapi.put('mib', 'alaDaUNPCustDomainMacRuleTable',
{'mibObject0':'alaDaUNPCustDomainMacRuleAddr:'+str(mac),
'mibObject1':'alaDaUNPCustDomainMacRuleDomainId:0',
'mibObject2':'alaDaUNPCustDomainMacRuleProfileName:'
+'OpenStack-UNP-'+str(vlan_id)}) ['result']
if self.aosapi.success():
LOG.info("unp_macrule[%s %s] creation in %s success!", vlan_id, mac, self.switch_ip)
ret = True
else:
LOG.info("unp_macrule[%s %s] creation in %s failed! %s", vlan_id, mac, self.switch_ip, results)
self.disconnect()
return ret
def get_unp_macrule(self, args=None):
ret = None
if self.connect() == False:
return ret
results = self.aosapi.query('mib', 'alaDaUNPCustDomainMacRuleTable',
{'mibObject0':'alaDaUNPCustDomainMacRuleAddr',
'mibObject1':'alaDaUNPCustDomainMacRuleDomainId',
'mibObject2':'alaDaUNPCustDomainMacRuleProfileName'})['result']
if self.aosapi.success():
ret = results
else:
LOG.info("get_unp_macrule failed in %s! [%s]", self.switch_ip, results)
self.disconnect()
return ret
def create_unp_vlanrule(self, vlan_id):
ret = False
if self.connect() == False:
return False
results = self.aosapi.put('mib', 'alaDaUNPCustDomainVlanTagRuleTable',
{'mibObject0':'alaDaUNPCustDomainVlanTagRuleVlan:'+str(vlan_id),
'mibObject1':'alaDaUNPCustDomainVlanTagRuleDomainId:0',
'mibObject2':'alaDaUNPCustDomainVlanTagRuleVlanProfileName:'
+'OpenStack-UNP-'+str(vlan_id)}) ['result']
if self.aosapi.diag() == 200:
LOG.info("unp_vlanrule[%s] creation in %s success!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("unp_vlanrule[%s] creation in %s failed!", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def delete_unp_vlan(self, vlan_id):
ret = False
if self.connect() == False:
return False
results = self.aosapi.delete('mib', 'alaDaUserNetProfileTable',
{'mibObject0':'alaDaUserNetProfileName:'+'OpenStack-UNP-'+str(vlan_id)})['result']
if self.aosapi.success():
LOG.info("unp_vlan %s deletion in %s success!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("unp_vlan %s deletion in %s failed! %s", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def delete_unp_macrule(self, vlan_id, mac):
ret = False
if self.connect() == False:
return False
results = self.aosapi.delete('mib', 'alaDaUNPCustDomainMacRuleTable',
{'mibObject0':'alaDaUNPCustDomainMacRuleAddr:'+str(mac),
'mibObject1':'alaDaUNPCustDomainMacRuleDomainId:0'})['result']
if self.aosapi.success():
LOG.info("unp_macrule[%s %s] deletion in %s suceess!", vlan_id, mac, self.switch_ip)
ret = True
else:
LOG.info("unp_macrule[%s %s] deletion in %s failed! %s", vlan_id, mac, self.switch_ip, results)
self.disconnect()
return ret
def delete_unp_vlanrule(self, vlan_id):
ret = False
if self.connect() == False:
return False
results = self.aosapi.delete('mib', 'alaDaUNPCustDomainVlanTagRuleTable',
{'mibObject0':'alaDaUNPCustDomainVlanTagRuleVlan:'+str(vlan_id),
'mibObject1':'alaDaUNPCustDomainVlanTagRuleDomainId:0'})['result']
if self.aosapi.success():
LOG.info("unp_vlanrule[%s] deletion in %s success!", vlan_id, self.switch_ip)
ret = True
else:
LOG.info("unp_vlanrule[%s] deletion in %s failed!", vlan_id, self.switch_ip, results)
self.disconnect()
return ret
def enable_stp_mode_flat(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'vStpBridge',
{'mibObject0':'vStpBridgeMode:'+str(1)})['result']
if self.aosapi.success():
LOG.info("stp mode flat in %s success!", self.switch_ip)
ret = True
else:
LOG.info("stp mode flat in %s failed! %s", self.switch_ip, results)
self.disconnect()
return ret
def disable_stp_mode_flat(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'vStpBridge',
{'mibObject0':'vStpBridgeMode:'+str(2)})['result']
if self.aosapi.success():
LOG.info("stp mode 1X1 in %s success!", self.switch_ip)
ret = True
else:
LOG.info("stp mode 1X1 in %s failed! %s", self.switch_ip, results)
self.disconnect()
return ret
def enable_mvrp_global(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'alcatelIND1MVRPMIBObjects',
{'mibObject0':'alaMvrpGlobalStatus:'+str(1)}) ['result']
if self.aosapi.success():
LOG.info("mvrp enable global in %s success!", self.switch_ip)
ret = True
else:
LOG.info("mvrp enable global in %s failed! %s", self.switch_ip, results)
self.disconnect()
return ret
def disable_mvrp_global(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'alcatelIND1MVRPMIBObjects',
{'mibObject0':'alaMvrpGlobalStatus:'+str(2)}) ['result']
if self.aosapi.success():
LOG.info("mvrp disable global in %s success!", self.switch_ip)
ret = True
else:
LOG.info("mvrp disable global in %s failed! %s", self.switch_ip, results)
#print results
self.disconnect()
return ret
def enable_mvrp_if(self, slotport):
ret = False
if self.connect() == False:
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.post('mib', 'alaMvrpPortConfigTable',
{'mibObject0':'alaMvrpPortConfigIfIndex:'+str(ifindex),
'mibObject1':'alaMvrpPortStatus:'+str(1)})['result']
if self.aosapi.success():
LOG.info("mvrp enable on %s %s success!", slotport, self.switch_ip)
ret = True
else:
LOG.info("mvrp enable on %s %s failed! %s", slotport, self.switch_ip, results)
self.disconnect()
return ret
def disable_mvrp_if(self, slotport):
ret = False
if self.connect() == False:
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.post('mib', 'alaMvrpPortConfigTable',
{'mibObject0':'alaMvrpPortConfigIfIndex:'+str(ifindex),
'mibObject1':'alaMvrpPortStatus:'+str(2)})['result']
if self.aosapi.success():
LOG.info("mvrp disable on %s %s success!", slotport, self.switch_ip)
ret = True
else:
LOG.info("mvrp disable on %s %s failed! %s", slotport, self.switch_ip, results)
#print results
self.disconnect()
return ret
def enable_mvrp(self, slotport=None):
if slotport:
return self.enable_mvrp_if(slotport)
else:
if self.enable_stp_mode_flat() == True:
return self.enable_mvrp_global()
else:
return False
def disable_mvrp(self, slotport=None):
if slotport:
return self.disable_mvrp_if(slotport)
else:
if self.disable_mvrp_global() == True:
return self.disable_stp_mode_flat()
else:
return False
def enable_unp(self, slotport):
ret = False
if self.connect() == False:
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.put('mib', 'alaDaUNPPortTable',
{'mibObject0':'alaDaUNPPortIfIndex:'+str(ifindex),
'mibObject1':'alaDaUNPPortClassificationFlag:'+str(1)})['result']
if self.aosapi.success():
LOG.info("unp enable on %s %s success!", slotport, self.switch_ip)
ret = True
else:
LOG.info("unp enable on %s %s failed! [%s]", slotport, self.switch_ip, results)
self.disconnect()
return ret
def disable_unp(self, slotport):
ret = False
if self.connect() == False:
return False
ifindex = self._get_ifindex_from_slotport(slotport)
results = self.aosapi.delete('mib', 'alaDaUNPPortTable',
{'mibObject0':'alaDaUNPPortIfIndex:'+str(ifindex),
'mibObject1':'alaDaUNPPortClassificationFlag:'+str(1)})['result']
if self.aosapi.success():
LOG.info("unp disable on %s %s success!", slotport, self.switch_ip)
ret = True
else:
LOG.info("unp disable on %s %s failed! [%s]", slotport, self.switch_ip, results)
#print results
self.disconnect()
return ret
def write_memory(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'configManager',
{'mibObject0':'configWriteMemory:'+str(1)})['result']
if self.aosapi.success():
LOG.info("write memory success on %s", self.switch_ip)
ret = True
else:
LOG.info("write memory failed on %s [%s]", self.switch_ip, results)
self.disconnect()
return ret
def copy_running_certified(self):
ret = False
if self.connect() == False:
return False
results = self.aosapi.post('mib', 'chasControlModuleTable',
{'mibObject0':'entPhysicalIndex:'+str(65),
'mibObject1':'chasControlVersionMngt:'+str(2)})['result']
if self.aosapi.success():
LOG.info("copy running certified success on %s", self.switch_ip)
ret = True
else:
results = self.aosapi.post('mib', 'chasControlModuleTable',
{'mibObject0':'entPhysicalIndex:'+str(66),
'mibObject1':'chasControlVersionMngt:'+str(2)})['result']
if self.aosapi.success():
LOG.info("copy running certified success on %s", self.switch_ip)
ret = True
else:
LOG.info("copy running certified failed on %s [%s]", self.switch_ip, results)
self.disconnect()
return ret
##### OneTouch functions for OpenStack APIs #####
def create_network(self, vlan_id, net_name=''):
self.threadLock.acquire(1)
ret = 0
if self.create_vlan(vlan_id, net_name) == True :
ret = self.create_unp_vlan(vlan_id)
self.threadLock.release()
return ret
def delete_network(self, vlan_id):
self.threadLock.acquire(1)
ret = 0
if self.delete_unp_vlan(vlan_id) == True :
ret = self.delete_vlan(vlan_id)
self.threadLock.release()
return ret
def create_port(self, vlan_id, mac=None):
self.threadLock.acquire(1)
if mac :
ret = self.create_unp_macrule(vlan_id, mac)
else :
ret = self.create_unp_vlanrule(vlan_id)
self.threadLock.release()
return ret
def delete_port(self, vlan_id, mac=None):
self.threadLock.acquire(1)
if mac :
ret = self.delete_unp_macrule(vlan_id, mac)
else :
ret = self.delete_unp_vlanrule(vlan_id)
self.threadLock.release()
return ret
def save_config(self):
self.threadLock.acquire(1)
ret = 0
if self.write_memory():
time.sleep(1)
ret = self.copy_running_certified()
time.sleep(2)
self.threadLock.release()
return ret
else:
ret = False
self.threadLock.release()
return ret
##### Internal Utility functions #####
def _get_ifindex_from_slotport(self, slotport):
""" convert slot/port = '1/2' to ifIndex = 1002 """
""" convert chassis/slot/port = '1/2/3' to ifIndex = 102003 """
""" convert linkagg id = '5' to ifIndex = 40000005 """
if len(slotport.split('/')) == 3 :
chassis = int(slotport.split('/')[0])
if chassis == 0:
chassis = 1
slot = int(slotport.split('/')[1])
port = int(slotport.split('/')[2])
return(str(((chassis-1)*100000) + (slot*1000) + port))
elif len(slotport.split('/')) == 2 :
slot = int(slotport.split('/')[0])
port = int(slotport.split('/')[1])
return(str((slot*1000) + port))
elif len(slotport.split('/')) == 1 :
linkagg = int(slotport.split('/')[0])
return(str(40000000+linkagg))
else:
LOG.info("Error: ifIndex calc: invalid slotport %s",slotport)
return 0
##### functions used by scripts outside of neutron-server
# dont use lock in this API as it uses one-touch api which already has lock
def clear_config(self, vlan_ids):
# delete mac_rules
results = self.get_unp_macrule()
if len(results['data']):
for i in vlan_ids:
for key, value in results['data']['rows'].items():
if 'OpenStack-UNP-'+str(i) == value['alaDaUNPCustDomainMacRuleProfileName'] :
self.delete_port(i, value['alaDaUNPCustDomainMacRuleAddr'])
# delete vlan_rules and vlans
for i in vlan_ids:
self.delete_port(i)
self.delete_unp_vlan(i)
self.delete_vlan(i)
| 37.135048 | 117 | 0.57295 | [
"Apache-2.0"
] | Alcatel-LucentEnterpriseData/ALUe-OONP_H_R01 | omniswitch/omniswitch_restful_driver.py | 23,098 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ManagedServiceIdentityClientConfiguration(Configuration):
"""Configuration for ManagedServiceIdentityClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Id of the Subscription to which the identity belongs.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ManagedServiceIdentityClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-09-30-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-msi/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.397059 | 130 | 0.703434 | [
"MIT"
] | MichaelZp0/azure-sdk-for-python | sdk/resources/azure-mgmt-msi/azure/mgmt/msi/v2019_09_01_preview/aio/_configuration.py | 3,291 | Python |
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import beta
from restools.plotting import rasterise_and_save
from papers.jfm2020_probabilistic_protocol.data import Summary as SummaryProbProto
from papers.jfm2020_probabilistic_protocol.extensions import LaminarisationProbabilityFittingFunction2020JFM
from comsdk.comaux import load_from_json
def plot_pretty_tick(ax, val, on_x=False, on_y=False):
if on_x:
ax.plot([val, val], [0.0, 0.025], 'k-', linewidth=2)
if on_y:
ax.plot([-0.002, -0.0018], [val, val], 'k-', linewidth=2)
def plot_pretty_annotation_on_axis(ax, val, up_lim, text='', on_x=False, on_y=False, shift=None):
if on_x:
ax.plot([val, val], [0.04, up_lim], 'k--', linewidth=1)
shift = 0.0002 if shift is None else shift
ax.text(val + shift, 0.03, text, fontsize=14)
if on_y:
ax.plot([-0.0016, up_lim], [val, val], 'k--', linewidth=1)
shift = -0.05 if shift is None else shift
ax.text(-0.0017, val + shift, text, fontsize=14)
plot_pretty_tick(ax, val, on_x=on_x, on_y=on_y)
def plot_posterior_distribution(ax, N_lam, N_turb, obj_to_rasterize, simplified=False):
a = N_lam + 1./2
b = N_turb + 1./2
ax.plot(x, beta.pdf(x, a, b), label=r'$N = ' + str(N_lam + N_turb) + '$\n$l = ' + str(N_lam) + r'$',
linewidth=4 if simplified else 2)
ax.plot([beta.mean(a, b)], [beta.pdf(beta.mean(a, b), a, b)], 'ro',
markersize=12 if simplified else 8)
_, y_max = ax.get_ylim()
ax.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
if simplified:
ax.set_xticklabels([])
ax.set_yticklabels([])
else:
obj = ax.fill_between([beta.ppf(0.1, a, b), beta.ppf(0.9, a, b)], [0., 0.], [y_max, y_max], alpha=0.3)
obj_to_rasterize.append(obj)
ax.legend(fontsize=12)
ax.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.set_xlabel(r'$p$')
if __name__ == '__main__':
plt.style.use('resources/default.mplstyle')
# Plot fitting sketch
summary_prob_proto = load_from_json(SummaryProbProto)
energies = 0.5 * np.r_[[0.], summary_prob_proto.energy_levels]
energies = energies[0:-20]
print(summary_prob_proto.confs[1].description)
p_lam = np.r_[[1.], summary_prob_proto.confs[1].p_lam]
p_lam = p_lam[0:-20]
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
bar_width = 0.0004
# plot p_lam bars
obj = ax.bar(energies, p_lam, 2*bar_width, alpha=0.75, color='lightpink', zorder=0)
# plot fitting
fitting = LaminarisationProbabilityFittingFunction2020JFM.from_data(energies, p_lam)
e = np.linspace(energies[0], energies[-1], 200)
ax.plot(e, fitting(e), color='blue', zorder=0, linewidth=3)
E_99 = fitting.energy_with_99_lam_prob()
E_flex = fitting.energy_at_inflection_point()
E_a = fitting.energy_close_to_asymptote()
ax.plot([E_99], [fitting(E_99)], 'ko', markersize=10)
ax.plot([E_flex], [fitting(E_flex)], 'ko', markersize=10)
ax.plot([E_a], [fitting(E_a)], 'ko', markersize=10)
ax.set_xlim((-0.002, energies[-1]))
ax.set_ylim((0, 1.05))
energies_ticks = [None for _ in range(len(energies))]
for i in range(3):
energies_ticks[i] = r'$E^{(' + str(i + 1) + ')}$'
energies_ticks[3] = r'...'
energies_ticks[10] = r'$E^{(j)}$'
# ax.annotate(r'$\mathbb{E} P_{lam}(E^{(j)})$', xy=(energies[10], p_lam[10]), xytext=(energies[10] - 0.002, p_lam[10] + 0.2),
# arrowprops=dict(arrowstyle='->'), fontsize=16)
ax.annotate(r'$\bar{P}_{lam}(E^{(j)})$', xy=(energies[10], p_lam[10]), xytext=(energies[10] - 0.002, p_lam[10] + 0.2),
arrowprops=dict(arrowstyle='->'), fontsize=16)
ax.annotate(r'$p(E)$', xy=(energies[4], fitting(energies[4])), xytext=(energies[4] + 0.001, fitting(energies[4]) + 0.2),
arrowprops=dict(arrowstyle='->'), fontsize=16)
col_labels = ['Definition']
row_labels = [
r'$E_{99\%}$',
r'$E_{flex}$',
r'$E_{a}$',
r'$a$'
]
table_vals = [
[r'$p(E_{99\%})=0.99$'],
[r'$E_{flex} = \dfrac{\alpha - 1}{\beta}$'],
[r'$|p(E_{a}) - a| = 0.01$'],
[r'$a = \lim_{E \to \infty} p(E)$']
]
# the rectangle is where I want to place the table
table = plt.table(cellText=table_vals,
colWidths=[0.2]*3,
rowLabels=row_labels,
colLabels=col_labels,
loc='upper right')
#table.auto_set_font_size(False)
#table.set_fontsize(16)
table.scale(1, 3)
ax.set_xticks(energies)
ax.set_xticklabels(energies_ticks)
plot_pretty_annotation_on_axis(ax, fitting.asymp, energies[-1], text=r'$a$', on_y=True)
plot_pretty_annotation_on_axis(ax, 0.99, E_99, text=r'$0.99$', on_y=True)
plot_pretty_annotation_on_axis(ax, E_99, 0.99, text=r'$E_{99\%}$', on_x=True)
plot_pretty_annotation_on_axis(ax, E_flex, fitting(E_flex), text=r'$E_{flex}$', on_x=True)
plot_pretty_annotation_on_axis(ax, E_a, fitting(E_a), text=r'$E_{a}$', on_x=True)
# ax_secondary_yaxis = ax.twinx()
# ax_secondary_yaxis.tick_params(axis="y", direction="in", pad=-25.)
# ax_secondary_yaxis.yaxis.tick_left()
# #ax_secondary_yaxis = ax.secondary_yaxis("left", xticks=[fitting.asymp, 0.99],
# # xticklabels=[r'$a$', 0.99])
# ax_secondary_yaxis.set_yticks([fitting.asymp, 0.99])
# ax_secondary_yaxis.set_yticklabels([r'$a$', r'$0.99$'])
# for ax_ in (ax, ax_secondary_yaxis):
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.tight_layout()
plt.savefig('fitting_sketch.eps')
plt.show()
# Plot posterior distribution examples
fig, axes = plt.subplots(1, 3, figsize=(12, 3.5))
small_sample_cases = [
(0, 10), # first number = number of lam events; second number = number of turb events
(3, 7),
(9, 1),
]
large_sample_cases = [
(0, 30), # first number = number of lam events; second number = number of turb events
(9, 21),
(27, 3),
]
x = np.linspace(0., 1., 200)
obj_to_rasterize = []
for ax, (N_lam, N_turb) in zip(axes, small_sample_cases):
plot_posterior_distribution(ax, N_lam, N_turb, obj_to_rasterize)
for ax, (N_lam, N_turb) in zip(axes, large_sample_cases):
plot_posterior_distribution(ax, N_lam, N_turb, obj_to_rasterize)
for ax in axes:
ax.grid()
axes[0].set_ylabel(r'$f_{P_{lam}}(p | \boldsymbol{S} = \boldsymbol{s})$')
plt.tight_layout()
fname = 'posterior_examples.eps'
rasterise_and_save(fname, rasterise_list=obj_to_rasterize, fig=fig, dpi=300)
plt.show()
# Plot small posterior distributions for large schema
sample_cases = [
(0, 10), # first number = number of lam events; second number = number of turb events
(2, 8),
(7, 3),
]
for N_lam, N_turb in sample_cases:
fig, ax = plt.subplots(1, 1, figsize=(3, 2.3))
x = np.linspace(0., 1., 200)
obj_to_rasterize = []
plot_posterior_distribution(ax, N_lam, N_turb, obj_to_rasterize, simplified=True)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.tight_layout()
fname = 'posterior_example_{}_{}.eps'.format(N_lam, N_turb)
rasterise_and_save(fname, rasterise_list=obj_to_rasterize, fig=fig, dpi=300)
plt.show()
# Plot fitting sketch
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
bar_width = 0.0004
# plot p_lam bars
obj = ax.bar(energies, p_lam, 2*bar_width, alpha=0.75, color='lightpink', zorder=0)
# plot fitting
fitting = LaminarisationProbabilityFittingFunction2020JFM.from_data(energies, p_lam)
e = np.linspace(energies[0], energies[-1], 200)
ax.plot(e, fitting(e), color='blue', zorder=0, linewidth=3)
ax.set_xlim((-0.002, energies[-1]))
ax.set_ylim((0, 1.05))
energies_ticks = [None for _ in range(len(energies))]
for i in range(3):
energies_ticks[i] = r'$E^{(' + str(i + 1) + ')}$'
energies_ticks[3] = r'...'
energies_ticks[10] = r'$E^{(j)}$'
ax.set_xticks(energies)
ax.set_xticklabels(energies_ticks)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.tight_layout()
plt.savefig('fitting_sketch_simplified.eps')
plt.show()
| 40.903382 | 128 | 0.619228 | [
"MIT"
] | anton-pershin/restools | papers/jfm2022_optimizing_control_bayesian_method/views/sketches.py | 8,467 | Python |
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from .misc import *
__all__ = ['make_image', 'show_batch', 'show_mask', 'show_mask_single']
# functions to show an image
def make_image(img, mean=(0,0,0), std=(1,1,1)):
for i in range(0, 3):
img[i] = img[i] * std[i] + mean[i] # unnormalize
npimg = img.numpy()
return np.transpose(npimg, (1, 2, 0))
def gauss(x,a,b,c):
return torch.exp(-torch.pow(torch.add(x,-b),2).div(2*c*c)).mul(a)
def colorize(x):
''' Converts a one-channel grayscale image to a color heatmap image '''
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[1] = gauss(x,1,.5,.3)
cl[2] = gauss(x,1,.2,.3)
cl[cl.gt(1)] = 1
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:,0,:,:] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[:,1,:,:] = gauss(x,1,.5,.3)
cl[:,2,:,:] = gauss(x,1,.2,.3)
return cl
def show_batch(images, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.imshow(images)
plt.show()
def show_mask_single(images, mask, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
im_size = images.size(2)
# save for adding mask
im_data = images.clone()
for i in range(0, 3):
im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i] # unnormalize
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.subplot(2, 1, 1)
plt.imshow(images)
plt.axis('off')
# for b in range(mask.size(0)):
# mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
mask_size = mask.size(2)
# print('Max %f Min %f' % (mask.max(), mask.min()))
mask = (upsampling(mask, scale_factor=im_size/mask_size))
# mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
# for c in range(3):
# mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]
# print(mask.size())
mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
# mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.axis('off')
def show_mask(images, masklist, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
im_size = images.size(2)
# save for adding mask
im_data = images.clone()
for i in range(0, 3):
im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i] # unnormalize
images = make_image(torchvision.utils.make_grid(images), Mean, Std)
plt.subplot(1+len(masklist), 1, 1)
plt.imshow(images)
plt.axis('off')
for i in range(len(masklist)):
mask = masklist[i].data.cpu()
# for b in range(mask.size(0)):
# mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
mask_size = mask.size(2)
# print('Max %f Min %f' % (mask.max(), mask.min()))
mask = (upsampling(mask, scale_factor=im_size/mask_size))
# mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
# for c in range(3):
# mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]
# print(mask.size())
mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
# mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
plt.subplot(1+len(masklist), 1, i+2)
plt.imshow(mask)
plt.axis('off')
# x = torch.zeros(1, 3, 3)
# out = colorize(x)
# out_im = make_image(out)
# plt.imshow(out_im)
# plt.show() | 33.610619 | 95 | 0.575303 | [
"MIT"
] | 1337Eddy/BirdRecognitionPruning | rethinking-network-pruning/cifar/weight-level/utils/visualize.py | 3,798 | Python |
from django.db import models
from django.urls import reverse
from nautobot.core.models import BaseModel
from nautobot.extras.utils import extras_features
from nautobot.extras.models import ObjectChange
from nautobot.utilities.utils import serialize_object
@extras_features("graphql")
class DummyModel(BaseModel):
name = models.CharField(max_length=20, help_text="The name of this Dummy.")
number = models.IntegerField(default=100, help_text="The number of this Dummy.")
csv_headers = ["name", "number"]
class Meta:
ordering = ["name"]
def __str__(self):
return f"{self.name} - {self.number}"
def get_absolute_url(self):
return reverse("plugins:dummy_plugin:dummymodel", kwargs={"pk": self.pk})
def to_objectchange(self, action):
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
# related_object=self.virtual_machine,
object_data=serialize_object(self),
)
def to_csv(self):
return (
self.name,
self.number,
)
class AnotherDummyModel(BaseModel):
name = models.CharField(max_length=20)
number = models.IntegerField(default=100)
class Meta:
ordering = ["name"]
| 26.916667 | 84 | 0.666409 | [
"Apache-2.0"
] | FloLaco/nautobot | examples/dummy_plugin/dummy_plugin/models.py | 1,292 | Python |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.0.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] {"_uuid": "6f06de1b48e35853f80eb1f3384baae8f8536b3c"}
# <h1><center><font size="6">Santander EDA, PCA and Light GBM Classification Model</font></center></h1>
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Another_new_Santander_bank_-_geograph.org.uk_-_1710962.jpg/640px-Another_new_Santander_bank_-_geograph.org.uk_-_1710962.jpg"></img>
#
# <br>
# <b>
# In this challenge, Santander invites Kagglers to help them identify which customers will make a specific transaction in the future, irrespective of the amount of money transacted. The data provided for this competition has the same structure as the real data they have available to solve this problem.
# The data is anonimyzed, each row containing 200 numerical values identified just with a number.</b>
#
# <b>Inspired by Jiwei Liu's Kernel. I added Data Augmentation Segment to my kernel</b>
#
# <pre>
# <a id='0'><b>Content</b></a>
# - <a href='#1'><b>Import the Data</b></a>
# - <a href='#11'><b>Data Exploration</b></a>
# - <a href='#2'><b>Check for the missing values</b></a>
# - <a href='#3'><b>Visualizing the Satendar Customer Transactions Data</b></a>
# - <a href='#31'><b>Check for Class Imbalance</b></a>
# - <a href='#32'><b>Distribution of Mean and Standard Deviation</b></a>
# - <a href='#33'><b>Distribution of Skewness</b></a>
# - <a href='#34'><b>Distribution of Kurtosis</b></a>
# - <a href='#4'><b>Principal Component Analysis</b></a>
# - <a href='#41'><b>Kernel PCA</b></a>
# - <a href = "#16"><b>Data Augmentation</b></a>
# - <a href='#6'><b>Build the Light GBM Model</b></a></pre>
# %% {"_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"}
import numpy as np
import pandas as pd
import lightgbm as lgb
import matplotlib
from sklearn.metrics import mean_squared_error
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold,KFold
import warnings
from six.moves import urllib
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings('ignore')
# %matplotlib inline
plt.style.use('seaborn')
from scipy.stats import norm, skew
# %% [markdown] {"_uuid": "d150ae0e24acf7d0107ec64ccea13d9745ce45fc"}
# <a id=1><pre><b>Import the Data</b></pre></a>
# %% {"_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0", "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"}
#Load the Data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
features = [c for c in train.columns if c not in ['ID_code', 'target']]
# %% [markdown] {"_uuid": "e711ea5576a8672fce378ede726be247aa789ef1"}
# <a id=11><pre><b>Data Exploration</b></pre></a>
# %% {"_uuid": "0ad0660223a680a8cc777c7526258759fface7a6"}
train.describe()
# %% {"_uuid": "217907a226a7e9425b4445805cde80c5de4feaca"}
train.info()
# %% {"_uuid": "90ca407e625a961a635fde6a21c9f524f024d654"}
train.shape
# %% {"_uuid": "089309dd0b32db21b44152f4bb15b2c7765dfd87"}
train.head(5)
# %% [markdown] {"_uuid": "3548150c4ae4ccd847d84baea5cba641f4fdc0bb"}
# <a id=2><b><pre>Check for the Missing Values.</pre></b></a>
# %% {"_uuid": "906ec8c811e2d415d47c7f67d8ac23bed0d8699b"}
#Check for Missing Values after Concatination
obs = train.isnull().sum().sort_values(ascending = False)
percent = round(train.isnull().sum().sort_values(ascending = False)/len(train)*100, 2)
pd.concat([obs, percent], axis = 1,keys= ['Number of Observations', 'Percent'])
# %% [markdown] {"_uuid": "bfe81109ea380b1210a3a6d50547058a4ee0e9b5"}
# <pre>There are no missing values in the dataset</pre>
# %% [markdown] {"_uuid": "8d28011134ff59dc25080e743e028bb487b8c366"}
# <pre><a id = 3><b>Visualizing the Satendar Customer Transactions Data</b></a></pre>
# %% [markdown] {"_uuid": "6abbb24cafc26afb4c6f8c52ab6b0353e2698f2e"}
# <pre><a id = 31 ><b>Check for Class Imbalance</b></a></pre>
# %% {"_uuid": "ada8973ebb427bbf9934a911095c1338b9036b35"}
target = train['target']
train = train.drop(["ID_code", "target"], axis=1)
sns.set_style('whitegrid')
sns.countplot(target)
# %% [markdown] {"_uuid": "9bcb709f47ab634bd7ebaa7a9f0574d571e2b30e"}
# <pre><a id = 32 ><b>Distribution of Mean and Standard Deviation</b></a></pre>
#
# <pre>EDA Reference : https://www.kaggle.com/gpreda/santander-eda-and-prediction</pre>
# %% {"_uuid": "60077579a9b2e2b92119d2cebbf29c301c3ee279"}
plt.figure(figsize=(16,6))
plt.title("Distribution of mean values per row in the train and test set")
sns.distplot(train[features].mean(axis=1),color="black", kde=True,bins=120, label='train')
sns.distplot(test[features].mean(axis=1),color="red", kde=True,bins=120, label='test')
plt.legend()
plt.show()
# %% [markdown] {"_uuid": "c5f90ed3f3e3a6c21fd21e7891dd131a981e1f24"}
# <pre>Let's check the distribution of the mean of values per columns in the train and test datasets.</pre>
# %% {"_uuid": "4589fe2bb6b38c8f490057b6c2734aa1c8cf57a5"}
plt.figure(figsize=(16,6))
plt.title("Distribution of mean values per column in the train and test set")
sns.distplot(train[features].mean(axis=0),color="black", kde=True,bins=120, label='train')
sns.distplot(test[features].mean(axis=0),color="red", kde=True,bins=120, label='test')
plt.legend();plt.show()
# %% [markdown] {"_uuid": "17a1f1bd380a50f59f2293071f1fd1cb85d4cace"}
# <pre>Distribution for Standard Deviation</pre>
# %% {"_uuid": "1119bbd9854b60c53eff0f5c024df241cf99a4ff"}
plt.figure(figsize=(16,6))
plt.title("Distribution of std values per rows in the train and test set")
sns.distplot(train[features].std(axis=1),color="blue",kde=True,bins=120, label='train')
sns.distplot(test[features].std(axis=1),color="green", kde=True,bins=120, label='test')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "2e23ffd37c255be7b01aab8ef6b25d0bd4d2563f"}
# <pre>Let's check the distribution of the standard deviation of values per columns in the train and test datasets.</pre>
# %% {"_uuid": "734b96fd6a8aba302513797962498c906e299653"}
plt.figure(figsize=(16,6))
plt.title("Distribution of mean values per column in the train and test set")
sns.distplot(train[features].mean(axis=0),color="blue", kde=True,bins=120, label='train')
sns.distplot(test[features].mean(axis=0),color="green", kde=True,bins=120, label='test')
plt.legend();plt.show()
# %% [markdown] {"_uuid": "1200ca154b1928043b67fb114d7d0eb93bfbd7e7"}
# <pre>Let's check now the distribution of the mean value per row in the train dataset, grouped by value of target</pre>
# %% {"_uuid": "802622e99a858e7e1be8a56a0dcb32c217769736"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of mean values per row in the train set")
sns.distplot(t0[features].mean(axis=1),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].mean(axis=1),color="green", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "bae148d9255104a14c07b0075dbe67084039ada9"}
# <pre>Let's check now the distribution of the mean values per columns in the train and test datasets.</pre>
# %% {"_uuid": "5778c9b5a5b82264a02907471c98aba55e753cf9"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of mean values per column in the train set")
sns.distplot(t0[features].mean(axis=0),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].mean(axis=0),color="green", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "dfe2e017dbe64a93c707785b77a2f018c55d2a92"}
# <pre>Let's check now the distribution of the standard deviation per row in the train dataset, grouped by value of target</pre>
# %% {"_uuid": "03d83a9f09460a7e0e64de7cff618fb903511eb5"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of standard deviation values per row in the train set")
sns.distplot(t0[features].std(axis=1),color="blue", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].std(axis=1),color="red", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "0796b8aa04186d551ae3d92d28e18a548dc09e51"}
# <pre>Let's check now the distribution of standard deviation per columns in the train and test datasets.</pre>
# %% {"_uuid": "8fe584abb584e77e654eb6c768b42eeafda6b784"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of standard deviation values per column in the train set")
sns.distplot(t0[features].std(axis=0),color="blue", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].std(axis=0),color="red", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "61fb22a8fdac069232e1584d97e02ca6348c7eea"}
# <pre><a id = 33 ><b>Distribution of Skewness</b></a></pre>
#
# <pre>Let's see now the distribution of skewness on rows in train separated for values of target 0 and 1. We found the distribution is left skewed</pre>
# %% {"_uuid": "a353fcf6b2ce7db7d6c693a2761bc8ac0e005309"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of skew values per row in the train set")
sns.distplot(t0[features].skew(axis=1),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].skew(axis=1),color="blue", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "3a0d204c325a9b78ff5b242e3b23043645040499"}
# <pre>Let's see now the distribution of skewness on columns in train separated for values of target 0 and 1.</pre>
# %% {"_uuid": "e47c1c00db66e3f43c65efad776bd2bcbea8117d"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of skew values per column in the train set")
sns.distplot(t0[features].skew(axis=0),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].skew(axis=0),color="blue", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "52dc95b188e82d5e55503348b8db57abfb385ca2"}
# <pre><a id = 34 ><b>Distribution of Kurtosis</b></a></pre>
# %% [markdown] {"_uuid": "b3d635fc2ccd5d0ad662413ccff46e062a01a13c"}
# <pre>Let's see now the distribution of kurtosis on rows in train separated for values of target 0 and 1. We found the distribution to be Leptokurtic</pre>
# %% {"_uuid": "a0785f3344f18166d838b50ecfb05901ad2180c8"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of kurtosis values per row in the train set")
sns.distplot(t0[features].kurtosis(axis=1),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].kurtosis(axis=1),color="green", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_kg_hide-input": true, "_kg_hide-output": true, "_uuid": "736f0bde864b3bf327be491a0d820593415aa3f5"}
# <pre>Let's see now the distribution of kurtosis on columns in train separated for values of target 0 and 1.</pre>
# %% {"_uuid": "8b72cdd5a6f9b1db419fdd35e44974e219a9d376"}
t0 = train.loc[target == 0]
t1 = train.loc[target == 1]
plt.figure(figsize=(16,6))
plt.title("Distribution of kurtosis values per column in the train set")
sns.distplot(t0[features].kurtosis(axis=0),color="red", kde=True,bins=120, label='target = 0')
sns.distplot(t1[features].kurtosis(axis=0),color="green", kde=True,bins=120, label='target = 1')
plt.legend(); plt.show()
# %% [markdown] {"_uuid": "374e9be094d1adaf17888cb16aea2f10093edd9e"}
# <a id=4><pre><b>Principal Component Analysis to check Dimentionality Reduction<b></pre></a>
# %% {"_uuid": "0af73d37cc75d3685fcb5f8c2702ad8758070b94"}
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_scaled = scaler.fit_transform(train)
PCA_train_x = PCA(2).fit_transform(train_scaled)
plt.scatter(PCA_train_x[:, 0], PCA_train_x[:, 1], c=target, cmap="copper_r")
plt.axis('off')
plt.colorbar()
plt.show()
# %% [markdown] {"_uuid": "2482fcb3497bcc3b7fe7f27256e408ff98324de2"}
# <pre><a id = 41><b>Kernel PCA (Since the Graph above doesn't represent meaningful analysis)</b></a></pre>
# %% {"_uuid": "9206e909ab4be625c94811af6bd0b676f626de22"}
from sklearn.decomposition import KernelPCA
lin_pca = KernelPCA(n_components = 2, kernel="linear", fit_inverse_transform=True)
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.0433, fit_inverse_transform=True)
sig_pca = KernelPCA(n_components = 2, kernel="sigmoid", gamma=0.001, coef0=1, fit_inverse_transform=True)
plt.figure(figsize=(11, 4))
for subplot, pca, title in ((131, lin_pca, "Linear kernel"), (132, rbf_pca, "RBF kernel, $\gamma=0.04$"),
(133, sig_pca, "Sigmoid kernel, $\gamma=10^{-3}, r=1$")):
PCA_train_x = PCA(2).fit_transform(train_scaled)
plt.subplot(subplot)
plt.title(title, fontsize=14)
plt.scatter(PCA_train_x[:, 0], PCA_train_x[:, 1], c=target, cmap="nipy_spectral_r")
plt.xlabel("$z_1$", fontsize=18)
if subplot == 131:
plt.ylabel("$z_2$", fontsize=18, rotation=0)
plt.grid(True)
plt.show()
# %% [markdown] {"_uuid": "5b7a96339294daeedba94abaee4fbe6f16e69f2e"}
# <pre>Since PCA hasn't been useful, I decided to proceed with the existing dataset</pre>
# %% [markdown] {"_uuid": "96861473dd6cb2de3377a47684ece1714e1ab072"}
# <pre><a id = 16><b>Data Augmentation</b></a></pre>
# %% {"_uuid": "dfd26c446ff80f323791fbdbbbf158d355ee7267"}
def augment(x,y,t=2):
xs,xn = [],[]
for i in range(t):
mask = y>0
x1 = x[mask].copy()
ids = np.arange(x1.shape[0])
for c in range(x1.shape[1]):
np.random.shuffle(ids)
x1[:,c] = x1[ids][:,c]
xs.append(x1)
for i in range(t//2):
mask = y==0
x1 = x[mask].copy()
ids = np.arange(x1.shape[0])
for c in range(x1.shape[1]):
np.random.shuffle(ids)
x1[:,c] = x1[ids][:,c]
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x,xs,xn])
y = np.concatenate([y,ys,yn])
return x,y
# %% [markdown] {"_uuid": "a37f046be743d0086a2fc6094d78d7b9cab78055"}
# <pre><a id = 6><b>Build the Light GBM Model</b></a></pre>
# %% {"_uuid": "d418b9c44ef2f96b02db44d70aacbca61fe0952f"}
param = {
'bagging_freq': 5,
'bagging_fraction': 0.335,
'boost_from_average':'false',
'boost': 'gbdt',
'feature_fraction': 0.041,
'learning_rate': 0.0083,
'max_depth': -1,
'metric':'auc',
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'num_threads': 8,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1
}
# %% {"_uuid": "fc22f099688ce4928a44f1c68cd16d6b8473e207"}
train.shape
# %% {"_uuid": "8b4f1d5f4aef4730673a8a6bbb2e828c2f92e2a5"}
num_folds = 11
features = [c for c in train.columns if c not in ['ID_code', 'target']]
folds = KFold(n_splits=num_folds, random_state=2319)
oof = np.zeros(len(train))
getVal = np.zeros(len(train))
predictions = np.zeros(len(target))
feature_importance_df = pd.DataFrame()
print('Light GBM Model')
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train.values, target.values)):
X_train, y_train = train.iloc[trn_idx][features], target.iloc[trn_idx]
X_valid, y_valid = train.iloc[val_idx][features], target.iloc[val_idx]
X_tr, y_tr = augment(X_train.values, y_train.values)
X_tr = pd.DataFrame(X_tr)
print("Fold idx:{}".format(fold_ + 1))
trn_data = lgb.Dataset(X_tr, label=y_tr)
val_data = lgb.Dataset(X_valid, label=y_valid)
clf = lgb.train(param, trn_data, 1000000, valid_sets = [trn_data, val_data], verbose_eval=5000, early_stopping_rounds = 4000)
oof[val_idx] = clf.predict(train.iloc[val_idx][features], num_iteration=clf.best_iteration)
getVal[val_idx]+= clf.predict(train.iloc[val_idx][features], num_iteration=clf.best_iteration) / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = features
fold_importance_df["importance"] = clf.feature_importance()
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions += clf.predict(test[features], num_iteration=clf.best_iteration) / folds.n_splits
print("CV score: {:<8.5f}".format(roc_auc_score(target, oof)))
# %% {"_uuid": "f9dc76139cb15edf957be0a8400e6de33c14e655"}
cols = (feature_importance_df[["feature", "importance"]]
.groupby("feature")
.mean()
.sort_values(by="importance", ascending=False)[:1000].index)
best_features = feature_importance_df.loc[feature_importance_df.feature.isin(cols)]
plt.figure(figsize=(14,26))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance",ascending=False))
plt.title('LightGBM Features (averaged over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances.png')
# %% {"_uuid": "137cf3c3924422e1a15ac63f4e259b86db86c2c5"}
num_sub = 26
print('Saving the Submission File')
sub = pd.DataFrame({"ID_code": test.ID_code.values})
sub["target"] = predictions
sub.to_csv('submission{}.csv'.format(num_sub), index=False)
getValue = pd.DataFrame(getVal)
getValue.to_csv("Validation_kfold.csv")
| 42.78744 | 304 | 0.705995 | [
"MIT"
] | MarcusJones/kaggle_petfinder_adoption | reference_kernels/FORK EDA, PCA + Simple LGBM on KFold Technique.py | 17,714 | Python |
from pybfm.irr import IRR
# define annual cash flows
multiple_irr = IRR(
[0, 1, 2], # years [this year, first year, second year]
[-3000, 15000, -13000], # cash flows
[None, None, None], # kind of cash flow (None, perpetuity)
)
# find irr
irr = multiple_irr.find(initial_guess=0.05)
print(f"Internal Rate of Return (IRR) = {irr}")
# find modified irr
mirr = multiple_irr.find(initial_guess=0.05)
print(f"Internal Rate of Return (MIRR) = {mirr}")
# find all irr
irrs = multiple_irr.find_all([0.05,1])
print(f"All Internal Rate of Return (IRR) = {irrs}")
# check formula
formula_string = multiple_irr.formula_string
print(f"Formula = {formula_string}")
# get yield curve data
formula_string = multiple_irr.get_yield_curve(min_r=0, max_r=5, points=50)
print(f"Formula = {formula_string}")
# plot
fig, ax = multiple_irr.plot(
min_r=0,
max_r=5,
points=100,
figsize=(10,5),
color='blue',
)
# save image
fig.savefig('irr.png')
| 23.585366 | 74 | 0.684592 | [
"BSD-3-Clause"
] | javadebadi/pybfm | example/irr.py | 967 | Python |
from . import views
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', views.index, name='home'),
path('login/', views.login, name='login_user'),
path('logout/', views.logout, name='logout_user'),
path('signup/', views.signup, name='signup_user'),
path('description/', views.deskripsi, name='desc'),
path('about/', views.tentang, name='tentang'),
path('user/<slug:id>/', views.detail_user, name='detail_user'),
path('api/user/', include('login.urls')),
]
| 35.533333 | 67 | 0.662289 | [
"MIT"
] | mrizky17042000/DealIn | deal.in/deal_in/urls.py | 533 | Python |
from cppimport import import_hook
from .xbin import *
from .xbin_util import *
from .smear import *
Xbin = Xbin_float
create_Xbin_nside = create_Xbin_nside_float
| 20.375 | 43 | 0.809816 | [
"Apache-2.0"
] | chelseanfries/rpxdock | rpxdock/xbin/__init__.py | 163 | Python |
from datetime import datetime
from xattr import listxattr, getxattr
import falcon
import hashlib
import logging
from certidude import const, config
from certidude.common import cert_to_dn
from certidude.decorators import serialize, csrf_protection
from certidude.user import User
from .utils import AuthorityHandler
from .utils.firewall import login_required, authorize_admin
logger = logging.getLogger(__name__)
class CertificateAuthorityResource(object):
def on_get(self, req, resp):
logger.info("Served CA certificate to %s", req.context.get("remote_addr"))
resp.stream = open(config.AUTHORITY_CERTIFICATE_PATH, "rb")
resp.append_header("Content-Type", "application/x-x509-ca-cert")
resp.append_header("Content-Disposition", "attachment; filename=%s.crt" %
const.HOSTNAME)
class SessionResource(AuthorityHandler):
def __init__(self, authority, token_manager):
AuthorityHandler.__init__(self, authority)
self.token_manager = token_manager
@csrf_protection
@serialize
@login_required
@authorize_admin
def on_get(self, req, resp):
def serialize_requests(g):
for common_name, path, buf, req, submitted, server in g():
try:
submission_address = getxattr(path, "user.request.address").decode("ascii") # TODO: move to authority.py
except IOError:
submission_address = None
try:
submission_hostname = getxattr(path, "user.request.hostname").decode("ascii") # TODO: move to authority.py
except IOError:
submission_hostname = None
yield dict(
submitted = submitted,
common_name = common_name,
address = submission_address,
hostname = submission_hostname if submission_hostname != submission_address else None,
md5sum = hashlib.md5(buf).hexdigest(),
sha1sum = hashlib.sha1(buf).hexdigest(),
sha256sum = hashlib.sha256(buf).hexdigest(),
sha512sum = hashlib.sha512(buf).hexdigest()
)
def serialize_revoked(g):
for common_name, path, buf, cert, signed, expired, revoked, reason in g(limit=5):
yield dict(
serial = "%x" % cert.serial_number,
common_name = common_name,
# TODO: key type, key length, key exponent, key modulo
signed = signed,
expired = expired,
revoked = revoked,
reason = reason,
sha256sum = hashlib.sha256(buf).hexdigest())
def serialize_certificates(g):
for common_name, path, buf, cert, signed, expires in g():
# Extract certificate tags from filesystem
try:
tags = []
for tag in getxattr(path, "user.xdg.tags").decode("utf-8").split(","):
if "=" in tag:
k, v = tag.split("=", 1)
else:
k, v = "other", tag
tags.append(dict(id=tag, key=k, value=v))
except IOError: # No such attribute(s)
tags = None
attributes = {}
for key in listxattr(path):
if key.startswith(b"user.machine."):
attributes[key[13:].decode("ascii")] = getxattr(path, key).decode("ascii")
# Extract lease information from filesystem
try:
last_seen = datetime.strptime(getxattr(path, "user.lease.last_seen").decode("ascii"), "%Y-%m-%dT%H:%M:%S.%fZ")
lease = dict(
inner_address = getxattr(path, "user.lease.inner_address").decode("ascii"),
outer_address = getxattr(path, "user.lease.outer_address").decode("ascii"),
last_seen = last_seen,
age = datetime.utcnow() - last_seen
)
except IOError: # No such attribute(s)
lease = None
try:
signer_username = getxattr(path, "user.signature.username").decode("ascii")
except IOError:
signer_username = None
# TODO: dedup
serialized = dict(
serial = "%x" % cert.serial_number,
organizational_unit = cert.subject.native.get("organizational_unit_name"),
common_name = common_name,
# TODO: key type, key length, key exponent, key modulo
signed = signed,
expires = expires,
sha256sum = hashlib.sha256(buf).hexdigest(),
signer = signer_username,
lease = lease,
tags = tags,
attributes = attributes or None,
responder_url = None
)
for e in cert["tbs_certificate"]["extensions"].native:
if e["extn_id"] == "key_usage":
serialized["key_usage"] = e["extn_value"]
elif e["extn_id"] == "extended_key_usage":
serialized["extended_key_usage"] = e["extn_value"]
elif e["extn_id"] == "basic_constraints":
serialized["basic_constraints"] = e["extn_value"]
elif e["extn_id"] == "crl_distribution_points":
for c in e["extn_value"]:
serialized["revoked_url"] = c["distribution_point"]
break
serialized["extended_key_usage"] = e["extn_value"]
elif e["extn_id"] == "authority_information_access":
for a in e["extn_value"]:
if a["access_method"] == "ocsp":
serialized["responder_url"] = a["access_location"]
else:
raise NotImplementedError("Don't know how to handle AIA access method %s" % a["access_method"])
elif e["extn_id"] == "authority_key_identifier":
pass
elif e["extn_id"] == "key_identifier":
pass
elif e["extn_id"] == "subject_alt_name":
serialized["subject_alt_name"], = e["extn_value"]
else:
raise NotImplementedError("Don't know how to handle extension %s" % e["extn_id"])
yield serialized
logger.info("Logged in authority administrator %s from %s with %s" % (
req.context.get("user"), req.context.get("remote_addr"), req.context.get("user_agent")))
return dict(
user = dict(
name=req.context.get("user").name,
gn=req.context.get("user").given_name,
sn=req.context.get("user").surname,
mail=req.context.get("user").mail
),
request_submission_allowed = config.REQUEST_SUBMISSION_ALLOWED,
service = dict(
protocols = config.SERVICE_PROTOCOLS,
routers = [j[0] for j in self.authority.list_signed(
common_name=config.SERVICE_ROUTERS)]
),
builder = dict(
profiles = config.IMAGE_BUILDER_PROFILES or None
),
authority = dict(
hostname = const.FQDN,
tokens = self.token_manager.list() if self.token_manager else None,
tagging = [dict(name=t[0], type=t[1], title=t[2]) for t in config.TAG_TYPES],
lease = dict(
offline = 600, # Seconds from last seen activity to consider lease offline, OpenVPN reneg-sec option
dead = 604800 # Seconds from last activity to consider lease dead, X509 chain broken or machine discarded
),
certificate = dict(
algorithm = self.authority.public_key.algorithm,
common_name = self.authority.certificate.subject.native["common_name"],
distinguished_name = cert_to_dn(self.authority.certificate),
md5sum = hashlib.md5(self.authority.certificate_buf).hexdigest(),
blob = self.authority.certificate_buf.decode("ascii"),
organization = self.authority.certificate["tbs_certificate"]["subject"].native.get("organization_name"),
signed = self.authority.certificate["tbs_certificate"]["validity"]["not_before"].native.replace(tzinfo=None),
expires = self.authority.certificate["tbs_certificate"]["validity"]["not_after"].native.replace(tzinfo=None)
),
mailer = dict(
name = config.MAILER_NAME,
address = config.MAILER_ADDRESS
) if config.MAILER_ADDRESS else None,
user_enrollment_allowed=config.USER_ENROLLMENT_ALLOWED,
user_multiple_certificates=config.USER_MULTIPLE_CERTIFICATES,
events = config.EVENT_SOURCE_SUBSCRIBE % config.EVENT_SOURCE_TOKEN,
requests=serialize_requests(self.authority.list_requests),
signed=serialize_certificates(self.authority.list_signed),
revoked=serialize_revoked(self.authority.list_revoked),
signature = dict(
revocation_list_lifetime=config.REVOCATION_LIST_LIFETIME,
profiles = sorted([p.serialize() for p in config.PROFILES.values()], key=lambda p:p.get("slug")),
)
),
authorization = dict(
admin_users = User.objects.filter_admins(),
user_subnets = config.USER_SUBNETS or None,
autosign_subnets = config.AUTOSIGN_SUBNETS or None,
request_subnets = config.REQUEST_SUBNETS or None,
machine_enrollment_subnets=config.MACHINE_ENROLLMENT_SUBNETS or None,
admin_subnets=config.ADMIN_SUBNETS or None,
ocsp_subnets = config.OCSP_SUBNETS or None,
crl_subnets = config.CRL_SUBNETS or None,
scep_subnets = config.SCEP_SUBNETS or None,
),
features=dict(
token=bool(config.TOKEN_URL),
tagging=True,
leases=True,
logging=config.LOGGING_BACKEND)
)
| 50.023041 | 130 | 0.539383 | [
"MIT"
] | dresslerit/certidude | certidude/api/session.py | 10,855 | Python |
val1 = 5
val2 = 2
suma = val1 + val2
print(suma)
resta = val2 - val1
print(resta)
promedio = (val1+val2)/2
print(promedio)
producto = ((val1-val2)/2)*365
print(producto)
centroamerica = ("Guatemala, Belice, El salvador, Honduras, Nicaragua, Costa Rica, Panamá")
print(len(centroamerica))
print(centroamerica[53])
| 16.9 | 92 | 0.674556 | [
"CC0-1.0"
] | Salazar769/Ciclo-1-python | cadenas y variables.py | 339 | Python |
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/qr/{query}", response_class=HTMLResponse)
async def read_item(request: Request, query: str):
# строка query имеет строгий формат: FdIdOdDDMMYYYYddmmyyyy
# F,I,O - первая буква фамилии, имени, отчества
# d - количество звездочек (длина фамилии-1) например Харитонова Ульяна Йорковна => Х9У5Й7 отобразится как
# Х********* У***** Й*******
# DDMMYYYY - дата рождения
# ddmmyyyy - срок действия
fio = query[0] + '*' * int(query[1]) + ' '\
+ query[2] + '*' * int(query[3]) + ' '\
+ query[4] + '*' * int(query[5])
birthday = query[6:8] + '.' + query[8:10] + '.' + query[10:14]
expired = query[14:16] + '.' + query[16:18] + '.' + query[18:22]
return templates.TemplateResponse("qr.j2.html", {"request": request, "fio": fio, "birthday": birthday, "expired": expired})
| 38.066667 | 127 | 0.65324 | [
"Apache-2.0"
] | SKDCO/SKDCO | files/fake-QR-code-goskulugi/qr.py | 1,304 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from bedrock.mozorg.util import page
from bedrock.redirects.util import redirect
urlpatterns = (
# Issue 9727 /foundation/annualreport/2019/
redirect(r'^annualreport/$', 'foundation.annualreport.2019.index',
name='foundation.annualreport', locale_prefix=False),
# Older annual report financial faqs - these are linked from blog posts
# was e.g.: http://www.mozilla.org/foundation/documents/mozilla-2008-financial-faq.html
page('documents/mozilla-2006-financial-faq', 'foundation/documents/mozilla-2006-financial-faq.html'),
page('documents/mozilla-2007-financial-faq', 'foundation/documents/mozilla-2007-financial-faq.html'),
page('documents/mozilla-2008-financial-faq', 'foundation/documents/mozilla-2008-financial-faq.html'),
# ported from PHP in Bug 960689
page('documents/bylaws-amendment-1', 'foundation/documents/bylaws-amendment-1.html'),
page('documents/bylaws-amendment-2', 'foundation/documents/bylaws-amendment-2.html'),
page('documents/articles-of-incorporation', 'foundation/documents/articles-of-incorporation.html'),
page('documents/articles-of-incorporation/amendment', 'foundation/documents/articles-of-incorporation-amendment.html'),
page('documents/bylaws', 'foundation/documents/bylaws.html'),
# was https://www.mozilla.org/foundation/annualreport/2009/
page('annualreport/2009', 'foundation/annualreport/2009/index.html'),
# was .html
page('annualreport/2009/a-competitive-world', 'foundation/annualreport/2009/a-competitive-world.html'),
# was .html
page('annualreport/2009/broadening-our-scope', 'foundation/annualreport/2009/broadening-our-scope.html'),
# was .html
page('annualreport/2009/sustainability', 'foundation/annualreport/2009/sustainability.html'),
# was https://www.mozilla.org/foundation/annualreport/2009/faq.html
# changing to https://www.mozilla.org/foundation/annualreport/2009/faq/
page('annualreport/2009/faq', 'foundation/annualreport/2009/faq.html'),
page('annualreport/2010', 'foundation/annualreport/2010/index.html'),
page('annualreport/2010/ahead', 'foundation/annualreport/2010/ahead.html'),
page('annualreport/2010/opportunities', 'foundation/annualreport/2010/opportunities.html'),
page('annualreport/2010/people', 'foundation/annualreport/2010/people.html'),
page('annualreport/2010/faq', 'foundation/annualreport/2010/faq.html'),
page('annualreport/2011', 'foundation/annualreport/2011.html'),
page('annualreport/2011/faq', 'foundation/annualreport/2011faq.html'),
page('annualreport/2012', 'foundation/annualreport/2012/index.html'),
page('annualreport/2012/faq', 'foundation/annualreport/2012/faq.html'),
page('annualreport/2013', 'foundation/annualreport/2013/index.html'),
page('annualreport/2013/faq', 'foundation/annualreport/2013/faq.html'),
page('annualreport/2014', 'foundation/annualreport/2014/index.html'),
page('annualreport/2014/faq', 'foundation/annualreport/2014/faq.html'),
page('annualreport/2015', 'foundation/annualreport/2015/index.html'),
page('annualreport/2015/faq', 'foundation/annualreport/2015/faq.html'),
page('annualreport/2016', 'foundation/annualreport/2016/index.html'),
page('annualreport/2017', 'foundation/annualreport/2017/index.html'),
page('annualreport/2018', 'foundation/annualreport/2018/index.html'),
page('annualreport/2019', 'foundation/annualreport/2019/index.html'),
page('feed-icon-guidelines', 'foundation/feed-icon-guidelines/index.html'),
page('feed-icon-guidelines/faq', 'foundation/feed-icon-guidelines/faq.html'),
page('licensing', 'foundation/licensing.html'),
page('licensing/website-content', 'foundation/licensing/website-content.html'),
page('licensing/website-markup', 'foundation/licensing/website-markup.html'),
page('licensing/binary-components', 'foundation/licensing/binary-components/index.html'),
page('licensing/binary-components/rationale', 'foundation/licensing/binary-components/rationale.html'),
page('moco', 'foundation/moco.html'),
page('openwebfund/more', 'foundation/openwebfund/more.html'),
page('openwebfund/thanks', 'foundation/openwebfund/thanks.html'),
page('trademarks/policy', 'foundation/trademarks/policy.html'),
page('trademarks/list', 'foundation/trademarks/list.html'),
page('trademarks/distribution-policy', 'foundation/trademarks/distribution-policy.html'),
page('trademarks/community-edition-permitted-changes', 'foundation/trademarks/community-edition-permitted-changes.html'),
page('trademarks/community-edition-policy', 'foundation/trademarks/community-edition-policy.html'),
)
| 55.659091 | 125 | 0.746019 | [
"MPL-2.0"
] | amychurchwell/bedrock | bedrock/foundation/urls.py | 4,898 | Python |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.engine.addressable import (MutationError, NotSerializableError, addressable,
addressable_dict, addressable_list)
from pants.engine.objects import Resolvable, Serializable
from pants.util.objects import Exactly, TypeConstraintError
class SimpleSerializable(Serializable):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
class CountingResolvable(Resolvable):
def __init__(self, address, value):
self._address = address
self._value = value
self._resolutions = 0
@property
def address(self):
return self._address
def resolve(self):
try:
return self._value
finally:
self._resolutions += 1
@property
def resolutions(self):
return self._resolutions
class AddressableDescriptorTest(unittest.TestCase):
def test_inappropriate_application(self):
class NotSerializable:
def __init__(self, count):
super().__init__()
self.count = count
@addressable(Exactly(int))
def count(self):
pass
with self.assertRaises(NotSerializableError):
NotSerializable(42)
class AddressableTest(unittest.TestCase):
class Person(SimpleSerializable):
def __init__(self, age):
super(AddressableTest.Person, self).__init__()
self.age = age
@addressable(Exactly(int))
def age(self):
"""Return the person's age in years.
:rtype int
"""
def test_none(self):
person = self.Person(None)
self.assertIsNone(person.age, None)
def test_value(self):
person = self.Person(42)
self.assertEqual(42, person.age)
def test_address(self):
person = self.Person('//:meaning-of-life')
self.assertEqual('//:meaning-of-life', person.age)
def test_resolvable(self):
resolvable_age = CountingResolvable('//:meaning-of-life', 42)
person = self.Person(resolvable_age)
self.assertEqual(0, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(1, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(2, resolvable_age.resolutions)
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Person(42.0)
def test_type_mismatch_resolvable(self):
resolvable_age = CountingResolvable('//:meaning-of-life', 42.0)
person = self.Person(resolvable_age)
with self.assertRaises(TypeConstraintError):
person.age
def test_single_assignment(self):
person = self.Person(42)
with self.assertRaises(MutationError):
person.age = 37
class AddressableListTest(unittest.TestCase):
class Series(SimpleSerializable):
def __init__(self, values):
super(AddressableListTest.Series, self).__init__()
self.values = values
@addressable_list(Exactly(int, float))
def values(self):
"""Return this series' values.
:rtype list of int or float
"""
def test_none(self):
series = self.Series(None)
self.assertEqual([], series.values)
def test_values(self):
series = self.Series([42, 1 / 137.0])
self.assertEqual([42, 1 / 137.0], series.values)
def test_addresses(self):
series = self.Series(['//:meaning-of-life'])
self.assertEqual(['//:meaning-of-life'], series.values)
def test_resolvables(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
series = self.Series([resolvable_value])
self.assertEqual([1 / 137.0], series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[0])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
series = self.Series([42, '//:meaning-of-life', resolvable_value])
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual([42, '//:meaning-of-life', 1 / 137.0], series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[2])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Series({42, 1 / 137.0})
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Series([42, False])
def test_type_mismatch_resolvable(self):
resolvable_value = CountingResolvable('//:meaning-of-life', True)
series = self.Series([42, resolvable_value])
with self.assertRaises(TypeConstraintError):
series.values
def test_single_assignment(self):
series = self.Series([42])
with self.assertRaises(MutationError):
series.values = [37]
class AddressableDictTest(unittest.TestCase):
class Varz(SimpleSerializable):
def __init__(self, varz):
super(AddressableDictTest.Varz, self).__init__()
self.varz = varz
@addressable_dict(Exactly(int, float))
def varz(self):
"""Return a snapshot of the current /varz.
:rtype dict of string -> int or float
"""
def test_none(self):
varz = self.Varz(None)
self.assertEqual({}, varz.varz)
def test_values(self):
varz = self.Varz({'meaning of life': 42, 'fine structure constant': 1 / 137.0})
self.assertEqual({'meaning of life': 42, 'fine structure constant': 1 / 137.0}, varz.varz)
def test_addresses(self):
varz = self.Varz({'meaning of life': '//:meaning-of-life'})
self.assertEqual({'meaning of life': '//:meaning-of-life'}, varz.varz)
def test_resolvables(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
varz = self.Varz({'fine structure constant': resolvable_value})
self.assertEqual({'fine structure constant': 1 / 137.0}, varz.varz)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz['fine structure constant'])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
varz = self.Varz({'prime': 37,
'meaning of life': '//:meaning-of-life',
'fine structure constant': resolvable_value})
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual({'prime': 37,
'meaning of life': '//:meaning-of-life',
'fine structure constant': 1 / 137.0},
varz.varz)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz['fine structure constant'])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Varz([42, 1 / 137.0])
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Varz({'meaning of life': 42, 'fine structure constant': False})
def test_type_mismatch_resolvable(self):
resolvable_item = CountingResolvable('//:fine-structure-constant', True)
varz = self.Varz({'meaning of life': 42, 'fine structure constant': resolvable_item})
with self.assertRaises(TypeConstraintError):
varz.varz
def test_single_assignment(self):
varz = self.Varz({'meaning of life': 42})
with self.assertRaises(MutationError):
varz.varz = {'fine structure constant': 1 / 137.0}
| 29.503876 | 94 | 0.688255 | [
"Apache-2.0"
] | AHassanSOS/pants | tests/python/pants_test/engine/test_addressable.py | 7,612 | Python |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
from pathlib import Path
import os
import sys
import dj_database_url
from urllib.parse import urlparse
from django.core.management.utils import get_random_secret_key
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", get_random_secret_key())
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DEBUG", "False") == "True"
ALLOWED_HOSTS = os.getenv("DJANGO_ALLOWED_HOSTS", "127.0.0.1,localhost").split(",")
# Application definition
INSTALLED_APPS = [
"polls.apps.PollsConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"polls.middleware.StatsdMetricsMiddleware"
]
ROOT_URLCONF = "mysite.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mysite.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# if os.getenv("DEVELOPMENT_MODE", "False") == "True":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# elif len(sys.argv) > 0 and sys.argv[1] != 'collectstatic':
# if os.getenv("DATABASE_URL", None) is None:
# raise Exception("DATABASE_URL environment variable not defined")
# DATABASES = {
# "default": dj_database_url.parse(os.environ.get("DATABASE_URL")),
# }
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
| 28.892086 | 91 | 0.707171 | [
"MIT"
] | SigNoz/sample-django | mysite/settings.py | 4,016 | Python |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This test requires a running CAS server. You must use an ~/.authinfo
# file to specify your username and password. The CAS host and port must
# be specified using the CASHOST and CASPORT environment variables.
# A specific protocol ('cas', 'http', 'https', or 'auto') can be set using
# the CASPROTOCOL environment variable.
import os
import swat
import swat.utils.testing as tm
import dlpy
from swat.cas.table import CASTable
from dlpy.model import Model, Optimizer, AdamSolver, Sequence
from dlpy.sequential import Sequential
from dlpy.timeseries import TimeseriesTable
from dlpy.layers import (InputLayer, Conv2d, Conv1d, Pooling, Dense, OutputLayer,
Recurrent, Keypoints, BN, Res, Concat, Reshape, GlobalAveragePooling1D)
from dlpy.utils import caslibify, caslibify_context, file_exist_on_server
from dlpy.applications import Tiny_YoloV2
import unittest
class TestModel(unittest.TestCase):
'''
Please locate the images.sashdat file under the datasources to the DLPY_DATA_DIR.
'''
server_type = None
s = None
server_sep = '/'
data_dir = None
data_dir_local = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
self.s = swat.CAS()
self.server_type = tm.get_cas_host_type(self.s)
self.server_sep = '\\'
if self.server_type.startswith("lin") or self.server_type.startswith("osx"):
self.server_sep = '/'
if 'DLPY_DATA_DIR' in os.environ:
self.data_dir = os.environ.get('DLPY_DATA_DIR')
if self.data_dir.endswith(self.server_sep):
self.data_dir = self.data_dir[:-1]
self.data_dir += self.server_sep
if 'DLPY_DATA_DIR_LOCAL' in os.environ:
self.data_dir_local = os.environ.get('DLPY_DATA_DIR_LOCAL')
if self.data_dir_local.endswith(self.server_sep):
self.data_dir_local = self.data_dir_local[:-1]
self.data_dir_local += self.server_sep
def test_model1(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', lr=0.001)
if r.severity > 0:
for msg in r.messages:
print(msg)
self.assertTrue(r.severity <= 1)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model2(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_')
self.assertTrue(r.severity == 0)
r2 = model1.predict(data='eee')
self.assertTrue(r2.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model3(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_')
self.assertTrue(r.severity == 0)
r1 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=3)
self.assertTrue(r1.severity == 0)
r2 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=2)
self.assertTrue(r2.severity == 0)
r3 = model1.predict(data='eee')
self.assertTrue(r3.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model4(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_')
self.assertTrue(r.severity == 0)
r2 = model1.evaluate(data='eee')
self.assertTrue(r2.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model5(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_')
self.assertTrue(r.severity == 0)
r1 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=3)
self.assertTrue(r1.severity == 0)
r2 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=2)
self.assertTrue(r2.severity == 0)
r3 = model1.evaluate(data='eee')
self.assertTrue(r3.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model6(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
if r.severity > 0:
for msg in r.messages:
print(msg)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model7(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
r2 = model1.predict(data='eee', use_best_weights=True)
self.assertTrue(r2.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model8(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
if r.severity > 0:
for msg in r.messages:
print(msg)
r2 = model1.predict(data='eee')
self.assertTrue(r2.severity == 0)
if r2.severity > 0:
for msg in r2.messages:
print(msg)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model9(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
r2 = model1.evaluate(data='eee', use_best_weights=True)
self.assertTrue(r2.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model10(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
r2 = model1.evaluate(data='eee')
self.assertTrue(r2.severity == 0)
model1.save_to_table(self.data_dir)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model11(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
r1 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=3)
self.assertTrue(r1.severity == 0)
r2 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=2)
self.assertTrue(r2.severity == 0)
r3 = model1.evaluate(data='eee', use_best_weights=True)
self.assertTrue(r3.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model12(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
self.assertTrue(r.severity == 0)
r1 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=3)
self.assertTrue(r1.severity == 0)
r2 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=2, save_best_weights=True)
self.assertTrue(r2.severity == 0)
r3 = model1.predict(data='eee', use_best_weights=True)
self.assertTrue(r3.severity == 0)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model13(self):
model = Sequential(self.s, model_table='simple_cnn')
model.add(InputLayer(3, 224, 224))
model.add(Conv2d(2, 3))
model.add(Pooling(2))
model.add(Dense(4))
model.add(OutputLayer(n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
model.save_to_table(self.data_dir)
def test_model13a(self):
model = Sequential(self.s, model_table='simple_cnn')
model.add(InputLayer(3, 224, 224))
model.add(Conv2d(2, 3))
model.add(Pooling(2))
model.add(Dense(4))
model.add(OutputLayer(n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
model.save_to_table(self.data_dir)
def test_model13b(self):
model = Sequential(self.s, model_table='simple_cnn')
model.add(layer=InputLayer(n_channels=1, height=10, width=10))
model.add(layer=OutputLayer(n=10, full_connect=False))
self.assertTrue(model.summary.loc[1, 'Number of Parameters'] == (0, 0))
model1 = Sequential(self.s, model_table='simple_cnn')
model1.add(layer=InputLayer(n_channels=1, height=10, width=10))
model1.add(layer=OutputLayer(n=10, full_connect=True))
self.assertTrue(model1.summary.loc[1, 'Number of Parameters'] == (1000, 10))
model2 = Sequential(self.s, model_table='Simple_CNN')
model2.add(layer=InputLayer(n_channels=1, height=10, width=10))
model2.add(layer=OutputLayer(n=10, full_connect=True, include_bias=False))
self.assertTrue(model2.summary.loc[1, 'Number of Parameters'] == (1000, 0))
model3 = Sequential(self.s, model_table='Simple_CNN')
model3.add(layer=InputLayer(n_channels=1, height=10, width=10))
model3.add(layer=Conv2d(4, 3))
model3.add(layer=OutputLayer(n=10))
self.assertTrue(model3.summary.loc[2, 'Number of Parameters'] == (4000, 10))
model4 = Sequential(self.s, model_table='Simple_CNN')
model4.add(layer=InputLayer(n_channels=1, height=10, width=10))
model4.add(layer=Conv2d(4, 3))
model4.add(layer=OutputLayer(n=10, full_connect=False))
self.assertTrue(model4.summary.loc[2, 'Number of Parameters'] == (0, 0))
def test_model14(self):
model = Sequential(self.s, model_table='Simple_CNN')
model.add(layer=InputLayer(n_channels=1, height=10, width=10))
model.add(layer=OutputLayer())
model.summary
def test_model15(self):
model = Sequential(self.s, model_table='Simple_CNN')
model.add(layer=InputLayer(n_channels=1, height=10, width=10))
model.add(layer=Keypoints())
self.assertTrue(model.summary.loc[1, 'Number of Parameters'] == (0, 0))
def test_model16(self):
model = Sequential(self.s, model_table='Simple_CNN')
model.add(layer=InputLayer(n_channels=1, height=10, width=10))
model.add(layer=Keypoints(n=10, include_bias=False))
self.assertTrue(model.summary.loc[1, 'Number of Parameters'] == (1000, 0))
def test_model16(self):
model = Sequential(self.s, model_table='Simple_CNN')
model.add(layer=InputLayer(n_channels=1, height=10, width=10))
model.add(layer=Keypoints(n=10))
self.assertTrue(model.summary.loc[1, 'Number of Parameters'] == (1000, 10))
def test_model18(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
self.assertTrue(r.severity == 0)
model1.save_weights_csv(self.data_dir)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_evaluate_obj_det(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path = self.data_dir + 'evaluate_obj_det_det.sashdat', task = 'load')
self.s.table.loadtable(caslib = caslib,
casout = {'name': 'evaluate_obj_det_det', 'replace': True},
path = path)
self.s.table.loadtable(caslib = caslib,
casout = {'name': 'evaluate_obj_det_gt', 'replace': True},
path = 'evaluate_obj_det_gt.sashdat')
yolo_anchors = (5.9838598901098905,
3.4326923076923075,
2.184993862520458,
1.9841448445171848,
1.0261752136752136,
1.2277777777777779)
yolo_model = Tiny_YoloV2(self.s, grid_number = 17, scale = 1.0 / 255,
n_classes = 1, height = 544, width = 544,
predictions_per_grid = 3,
anchors = yolo_anchors,
max_boxes = 100,
coord_type = 'yolo',
max_label_per_image = 100,
class_scale = 1.0,
coord_scale = 2.0,
prediction_not_a_object_scale = 1,
object_scale = 5,
detection_threshold = 0.05,
iou_threshold = 0.2)
metrics = yolo_model.evaluate_object_detection(ground_truth = 'evaluate_obj_det_gt', coord_type = 'yolo',
detection_data = 'evaluate_obj_det_det', iou_thresholds=0.5)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_model_forecast1(self):
import datetime
try:
import pandas as pd
except:
unittest.TestCase.skipTest(self, "pandas not found in the libraries")
import numpy as np
filename1 = os.path.join(os.path.dirname(__file__), 'datasources', 'timeseries_exp1.csv')
importoptions1 = dict(filetype='delimited', delimiter=',')
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
self.table1 = TimeseriesTable.from_localfile(self.s, filename1, importoptions=importoptions1)
self.table1.timeseries_formatting(timeid='datetime',
timeseries=['series', 'covar'],
timeid_informat='ANYDTDTM19.',
timeid_format='DATETIME19.')
self.table1.timeseries_accumlation(acc_interval='day',
groupby=['id1var', 'id2var'])
self.table1.prepare_subsequences(seq_len=2,
target='series',
predictor_timeseries=['series'],
missing_handling='drop')
valid_start = datetime.date(2015, 1, 4)
test_start = datetime.date(2015, 1, 7)
traintbl, validtbl, testtbl = self.table1.timeseries_partition(
validation_start=valid_start, testing_start=test_start)
model1 = Sequential(self.s, model_table='lstm_rnn')
model1.add(InputLayer(std='STD'))
model1.add(Recurrent(rnn_type='LSTM', output_type='encoding', n=15, reversed_=False))
model1.add(OutputLayer(act='IDENTITY'))
optimizer = Optimizer(algorithm=AdamSolver(learning_rate=0.01), mini_batch_size=32,
seed=1234, max_epochs=10)
seq_spec = Sequence(**traintbl.sequence_opt)
result = model1.fit(traintbl, valid_table=validtbl, optimizer=optimizer,
sequence=seq_spec, **traintbl.inputs_target)
self.assertTrue(result.severity == 0)
resulttbl1 = model1.forecast(horizon=1)
self.assertTrue(isinstance(resulttbl1, CASTable))
self.assertTrue(resulttbl1.shape[0]==15)
local_resulttbl1 = resulttbl1.to_frame()
unique_time = local_resulttbl1.datetime.unique()
self.assertTrue(len(unique_time)==1)
self.assertTrue(pd.Timestamp(unique_time[0])==datetime.datetime(2015,1,7))
resulttbl2 = model1.forecast(horizon=3)
self.assertTrue(isinstance(resulttbl2, CASTable))
self.assertTrue(resulttbl2.shape[0]==45)
local_resulttbl2 = resulttbl2.to_frame()
local_resulttbl2.sort_values(by=['id1var', 'id2var', 'datetime'], inplace=True)
unique_time = local_resulttbl2.datetime.unique()
self.assertTrue(len(unique_time)==3)
for i in range(3):
self.assertTrue(pd.Timestamp(unique_time[i])==datetime.datetime(2015,1,7+i))
series_lag1 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag1'].values
series_lag2 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag2'].values
DL_Pred = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'_DL_Pred_'].values
self.assertTrue(np.array_equal(series_lag1[1:3], DL_Pred[0:2]))
self.assertTrue(series_lag2[2]==DL_Pred[0])
def test_model_forecast2(self):
import datetime
try:
import pandas as pd
except:
unittest.TestCase.skipTest(self, "pandas not found in the libraries")
import numpy as np
filename1 = os.path.join(os.path.dirname(__file__), 'datasources', 'timeseries_exp1.csv')
importoptions1 = dict(filetype='delimited', delimiter=',')
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
self.table2 = TimeseriesTable.from_localfile(self.s, filename1, importoptions=importoptions1)
self.table2.timeseries_formatting(timeid='datetime',
timeseries=['series', 'covar'],
timeid_informat='ANYDTDTM19.',
timeid_format='DATETIME19.')
self.table2.timeseries_accumlation(acc_interval='day',
groupby=['id1var', 'id2var'])
self.table2.prepare_subsequences(seq_len=2,
target='series',
predictor_timeseries=['series', 'covar'],
missing_handling='drop')
valid_start = datetime.date(2015, 1, 4)
test_start = datetime.date(2015, 1, 7)
traintbl, validtbl, testtbl = self.table2.timeseries_partition(
validation_start=valid_start, testing_start=test_start)
model1 = Sequential(self.s, model_table='lstm_rnn')
model1.add(InputLayer(std='STD'))
model1.add(Recurrent(rnn_type='LSTM', output_type='encoding', n=15, reversed_=False))
model1.add(OutputLayer(act='IDENTITY'))
optimizer = Optimizer(algorithm=AdamSolver(learning_rate=0.01), mini_batch_size=32,
seed=1234, max_epochs=10)
seq_spec = Sequence(**traintbl.sequence_opt)
result = model1.fit(traintbl, valid_table=validtbl, optimizer=optimizer,
sequence=seq_spec, **traintbl.inputs_target)
self.assertTrue(result.severity == 0)
resulttbl1 = model1.forecast(testtbl, horizon=1)
self.assertTrue(isinstance(resulttbl1, CASTable))
self.assertTrue(resulttbl1.shape[0]==testtbl.shape[0])
local_resulttbl1 = resulttbl1.to_frame()
unique_time = local_resulttbl1.datetime.unique()
self.assertTrue(len(unique_time)==4)
for i in range(4):
self.assertTrue(pd.Timestamp(unique_time[i])==datetime.datetime(2015,1,7+i))
resulttbl2 = model1.forecast(testtbl, horizon=3)
self.assertTrue(isinstance(resulttbl2, CASTable))
self.assertTrue(resulttbl2.shape[0]==45)
local_resulttbl2 = resulttbl2.to_frame()
local_resulttbl2.sort_values(by=['id1var', 'id2var', 'datetime'], inplace=True)
unique_time = local_resulttbl2.datetime.unique()
self.assertTrue(len(unique_time)==3)
for i in range(3):
self.assertTrue(pd.Timestamp(unique_time[i])==datetime.datetime(2015,1,7+i))
series_lag1 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag1'].values
series_lag2 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag2'].values
DL_Pred = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'_DL_Pred_'].values
self.assertTrue(np.array_equal(series_lag1[1:3], DL_Pred[0:2]))
self.assertTrue(series_lag2[2]==DL_Pred[0])
def test_model_forecast3(self):
import datetime
try:
import pandas as pd
except:
unittest.TestCase.skipTest(self, "pandas not found in the libraries")
import numpy as np
filename1 = os.path.join(os.path.dirname(__file__), 'datasources', 'timeseries_exp1.csv')
importoptions1 = dict(filetype='delimited', delimiter=',')
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
self.table3 = TimeseriesTable.from_localfile(self.s, filename1, importoptions=importoptions1)
self.table3.timeseries_formatting(timeid='datetime',
timeseries=['series', 'covar'],
timeid_informat='ANYDTDTM19.',
timeid_format='DATETIME19.')
self.table3.timeseries_accumlation(acc_interval='day',
groupby=['id1var', 'id2var'])
self.table3.prepare_subsequences(seq_len=2,
target='series',
predictor_timeseries=['series', 'covar'],
missing_handling='drop')
valid_start = datetime.date(2015, 1, 4)
test_start = datetime.date(2015, 1, 7)
traintbl, validtbl, testtbl = self.table3.timeseries_partition(
validation_start=valid_start, testing_start=test_start)
sascode = '''
data {};
set {};
drop series_lag1;
run;
'''.format(validtbl.name, validtbl.name)
self.s.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
sascode = '''
data {};
set {};
drop series_lag1;
run;
'''.format(testtbl.name, testtbl.name)
self.s.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
model1 = Sequential(self.s, model_table='lstm_rnn')
model1.add(InputLayer(std='STD'))
model1.add(Recurrent(rnn_type='LSTM', output_type='encoding', n=15, reversed_=False))
model1.add(OutputLayer(act='IDENTITY'))
optimizer = Optimizer(algorithm=AdamSolver(learning_rate=0.01), mini_batch_size=32,
seed=1234, max_epochs=10)
seq_spec = Sequence(**traintbl.sequence_opt)
result = model1.fit(traintbl, optimizer=optimizer,
sequence=seq_spec, **traintbl.inputs_target)
self.assertTrue(result.severity == 0)
resulttbl1 = model1.forecast(validtbl, horizon=1)
self.assertTrue(isinstance(resulttbl1, CASTable))
self.assertTrue(resulttbl1.shape[0]==15)
local_resulttbl1 = resulttbl1.to_frame()
unique_time = local_resulttbl1.datetime.unique()
self.assertTrue(len(unique_time)==1)
self.assertTrue(pd.Timestamp(unique_time[0])==datetime.datetime(2015,1,4))
resulttbl2 = model1.forecast(validtbl, horizon=3)
self.assertTrue(isinstance(resulttbl2, CASTable))
self.assertTrue(resulttbl2.shape[0]==45)
local_resulttbl2 = resulttbl2.to_frame()
local_resulttbl2.sort_values(by=['id1var', 'id2var', 'datetime'], inplace=True)
unique_time = local_resulttbl2.datetime.unique()
self.assertTrue(len(unique_time)==3)
for i in range(3):
self.assertTrue(pd.Timestamp(unique_time[i])==datetime.datetime(2015,1,4+i))
series_lag1 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag1'].values
series_lag2 = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'series_lag2'].values
DL_Pred = local_resulttbl2.loc[(local_resulttbl2.id1var==1) & (local_resulttbl2.id2var==1),
'_DL_Pred_'].values
self.assertTrue(np.array_equal(series_lag1[1:3], DL_Pred[0:2]))
self.assertTrue(series_lag2[2]==DL_Pred[0])
with self.assertRaises(RuntimeError):
resulttbl3 = model1.forecast(testtbl, horizon=3)
def test_load_reshape_detection(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
yolo_model = Model(self.s)
yolo_model.load(self.data_dir + 'YOLOV2_MULTISIZE.sashdat')
model_df = self.s.fetch(table = dict(name = yolo_model.model_name,
where = '_DLKey0_ eq "detection1" or _DLKey0_ eq "reshape1"'),
to = 50).Fetch
anchors_5 = model_df['_DLNumVal_'][model_df['_DLKey1_'] == 'detectionopts.anchors.8'].tolist()[0]
self.assertAlmostEqual(anchors_5, 1.0907, 4)
depth = model_df['_DLNumVal_'][model_df['_DLKey1_'] == 'reshapeopts.depth'].tolist()[0]
self.assertEqual(depth, 256)
def test_plot_ticks(self):
model1 = Sequential(self.s, model_table='Simple_CNN1')
model1.add(InputLayer(3, 224, 224))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Conv2d(8, 7))
model1.add(Pooling(2))
model1.add(Dense(16))
model1.add(OutputLayer(act='softmax', n=2))
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')
self.s.table.loadtable(caslib=caslib,
casout={'name': 'eee', 'replace': True},
path=path)
r = model1.fit(data='eee', inputs='_image_', target='_label_', lr=0.001, max_epochs=5)
# Test default tick_frequency value of 1
ax = model1.plot_training_history()
self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs)
# Test even
tick_frequency = 2
ax = model1.plot_training_history(tick_frequency=tick_frequency)
self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)
# Test odd
tick_frequency = 3
ax = model1.plot_training_history(tick_frequency=tick_frequency)
self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)
# Test max
tick_frequency = model1.n_epochs
ax = model1.plot_training_history(tick_frequency=tick_frequency)
self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)
# Test 0
tick_frequency = 0
ax = model1.plot_training_history(tick_frequency=tick_frequency)
self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs)
if (caslib is not None) and tmp_caslib:
self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
def test_stride(self):
model = Sequential(self.s, model_table = 'Simple_CNN_3classes_cropped')
model.add(InputLayer(1, width = 36, height = 144, #offsets = myimage.channel_means,
name = 'input1',
random_mutation = 'random',
random_flip = 'HV'))
model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Pooling(height = 2, width = 2, stride_vertical = 2, stride_horizontal = 1, pool = 'max')) # 72, 36
model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Pooling(height = 2, width = 2, stride_vertical = 2, stride_horizontal = 1, pool = 'max')) # 36*36
model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Pooling(2, pool = 'max')) # 18 * 18
model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Pooling(2, pool = 'max')) # 9 * 9
model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity'))
model.add(BN(act = 'relu'))
model.add(Pooling(9))
model.add(Dense(256, dropout = 0.5))
model.add(OutputLayer(act = 'softmax', n = 3, name = 'output1'))
self.assertEqual(model.summary['Output Size'].values[-3], (1, 1, 1024))
model.print_summary()
# 2d print summary numerical check
self.assertEqual(model.summary.iloc[1, -1], 2985984)
def test_heat_map_analysis(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, 'DLPY_DATA_DIR is not set in the environment variables')
if not file_exist_on_server(self.s, self.data_dir + 'ResNet-50-model.caffemodel.h5'):
unittest.TestCase.skipTest(self, "File, {}, not found.".format(self.data_dir
+ 'ResNet-50-model.caffemodel.h5'))
from dlpy.applications import ResNet50_Caffe
from dlpy.images import ImageTable
pre_train_weight_file = os.path.join(self.data_dir, 'ResNet-50-model.caffemodel.h5')
my_im = ImageTable.load_files(self.s, self.data_dir+'giraffe_dolphin_small')
my_im_r = my_im.resize(width=224, inplace=False)
model = ResNet50_Caffe(self.s, model_table='ResNet50_Caffe',
n_classes=2, n_channels=3, width=224, height=224, scale=1,
random_flip='none', random_crop='none',
offsets=my_im_r.channel_means, pre_trained_weights=True,
pre_trained_weights_file=pre_train_weight_file,
include_top=False)
model.fit(data=my_im_r, mini_batch_size=1, max_epochs=1)
model.heat_map_analysis(data=my_im_r, mask_width=None, mask_height=None, step_size=None,
max_display=1)
self.assertRaises(ValueError, lambda:model.heat_map_analysis(mask_width=56, mask_height=56,
step_size=8, display=False))
self.assertRaises(ValueError, lambda:model.heat_map_analysis(data=my_im, mask_width=56,
mask_height=56, step_size=8, display=False))
try:
from numpy import array
except:
unittest.TestCase.skipTest(self, 'numpy is not installed')
self.assertRaises(ValueError, lambda:model.heat_map_analysis(data=array([]), mask_width=56,
mask_height=56, step_size=8, display=False))
def test_load_padding(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")
model5 = Model(self.s)
model5.load(path = self.data_dir + 'vgg16.sashdat')
def test_conv1d_model(self):
# a model from https://blog.goodaudience.com/introduction-to-1d-convolutional-neural-networks-in-keras-for-time-sequences-3a7ff801a2cf
Conv1D = Conv1d
MaxPooling1D=Pooling
model_m = Sequential(self.s)
model_m.add(InputLayer(width=80*3, height=1, n_channels=1))
model_m.add(Conv1D(100, 10, act='relu'))
model_m.add(Conv1D(100, 10, act='relu'))
model_m.add(MaxPooling1D(3))
model_m.add(Conv1D(160, 10, act='relu'))
model_m.add(Conv1D(160, 10, act='relu'))
model_m.add(GlobalAveragePooling1D(dropout=0.5))
model_m.add(OutputLayer(n=6, act='softmax'))
# use assertEqual to check whether the layer output size matches the expected value for MaxPooling1D
self.assertEqual(model_m.layers[3].output_size, (1, 80, 100))
model_m.print_summary()
# 1d print summary numerical check
self.assertEqual(model_m.summary.iloc[1, -1], 240000)
def test_load_weights_attr(self):
model = Model(self.s)
model.load(path=self.data_dir+'Simple_CNN1.sashdat')
# load_weights_attr table from server; expect to be clean
model.load_weights_attr(self.data_dir+'Simple_CNN1_weights_attr.sashdat')
def test_mobilenetv2(self):
try:
import onnx
from dlpy.model_conversion.onnx_transforms import (Transformer, OpTypePattern,
ConstToInitializer,
InitReshape, InitUnsqueeze,
FuseMulAddBN)
from dlpy.model_conversion.onnx_graph import OnnxGraph
from onnx import helper, numpy_helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
from dlpy.model import Model
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables")
path = os.path.join(self.data_dir_local, 'mobilenetv2-1.0.onnx')
onnx_model = onnx.load_model(path)
model1 = Model.from_onnx_model(self.s,
onnx_model,
output_model_table='mobilenetv2',
offsets=255*[0.485, 0.456, 0.406],
norm_stds=255*[0.229, 0.224, 0.225])
def test_model_crnn_bug(self):
model = Sequential(self.s, model_table='crnn')
model.add(InputLayer(3,256,16))
model.add(Reshape(height=16,width=256,depth=3))
model.add(Conv2d(64,3,3,stride=1,padding=1)) # size = 16x256x64
model.add(Pooling(2,2,2)) # size = 8x128x64
model.add(Conv2d(128,3,3,stride=1,padding=1)) # size = 8x128x128
model.add(Pooling(2,2,2)) # size = 4x64x128
model.add(Conv2d(256,3,3,stride=1,padding=1,act='IDENTITY')) # size = 4x64x256
model.add(BN(act='RELU')) # size = 4x64x256
model.add(Conv2d(256,3,3,stride=1,padding=1)) # size = 4x64x256
model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2))
#, padding=1)) # size = 2x64x256
#model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,)) # size = 2x64x256
model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 2x64x512
model.add(BN(act='RELU'))
model.add(Conv2d(512,3,3,stride=1,padding=1)) # size = 2x64x512
model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2)) #, padding=1)) # size = 1x64x512
#model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,)) # size = 1x64x512
model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 1x64x512
model.add(BN(act='RELU'))
model.add(Reshape(order='DWH',width=64, height=512, depth=1))
model.add(Recurrent(512,output_type='SAMELENGTH'))
model.add(OutputLayer(error='CTC'))
model.print_summary()
def tearDown(self):
# tear down tests
try:
self.s.terminate()
except swat.SWATError:
pass
del self.s
swat.reset_option()
if __name__ == '__main__':
unittest.main()
| 44.142986 | 142 | 0.591271 | [
"Apache-2.0"
] | Mentos05/python-dlpy | dlpy/tests/test_model.py | 48,778 | Python |
from pathlib import Path
from typing import *
from isolateparser.resultparser.parsers import gdtotable
import pytest
import itertools
from loguru import logger
data_folder = Path(__file__).parent / "data" / "sample_files"
@pytest.fixture
def parser() -> gdtotable.GDToTable:
return gdtotable.GDToTable()
def example_gd_file_contents() -> List[str]:
expected = [
"SNP 1 13 1 598843 G gene_name=JNAKOBFD_00523/JNAKOBFD_00524 gene_position=intergenic (+4/+95) gene_product=[locus_tag=Bcen2424_2337] [db_xref=InterPro:IPR001734] [protein=Na+/solute symporter][protein_id=ABK09088.1] [location=complement(2599928..2601478)][gbkey=CDS]/[locus_tag=Bcen2424_2336] [db_xref=InterPro:IPR004360,InterPro:IPR009725] [protein=Glyoxalase/bleomycin resistance protein/dioxygenase] [protein_id=ABK09087.1][location=2599374..2599829] [gbkey=CDS] genes_promoter=JNAKOBFD_00524 html_gene_name=<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i> locus_tag=–/– mutation_category=snp_intergenic snp_type=intergenic",
"INS 2 16 1 1137319 C gene_name=JNAKOBFD_01012 gene_position=coding (1836/1887 nt) gene_product=[locus_tag=Bcen2424_6766] [db_xref=InterPro:IPR002202] [protein=PE-PGRS family protein][protein_id=ABK13495.1] [location=complement(1015331..1019638)][gbkey=CDS] gene_strand=< genes_overlapping=JNAKOBFD_01012 html_gene_name=<i>JNAKOBFD_01012</i> ← mutation_category=small_indel repeat_length=1 repeat_new_copies=6 repeat_ref_copies=5 repeat_seq=C",
"SNP 3 38 1 1301313 A aa_new_seq=L aa_position=759 aa_ref_seq=L codon_new_seq=TTG codon_number=759 codon_position=1 codon_ref_seq=CTG gene_name=JNAKOBFD_01153 gene_position=2275 gene_product=[locus_tag=Bcen2424_5983] [db_xref=InterPro:IPR005594,InterPro:IPR006162,InterPro:IPR008635,InterPro:IPR008640][protein=YadA C-terminal domain protein][protein_id=ABK12716.1] [location=complement(124247..128662)][gbkey=CDS] gene_strand=< genes_overlapping=JNAKOBFD_01153 html_gene_name=<i>JNAKOBFD_01153</i> ← mutation_category=snp_synonymous snp_type=synonymous transl_table=1",
"SNP 4 39 1 1301317 C aa_new_seq=T aa_position=757 aa_ref_seq=T codon_new_seq=ACG codon_number=757 codon_position=3 codon_ref_seq=ACC gene_name=JNAKOBFD_01153 gene_position=2271 gene_product=[locus_tag=Bcen2424_5983] [db_xref=InterPro:IPR005594,InterPro:IPR006162,InterPro:IPR008635,InterPro:IPR008640][protein=YadA C-terminal domain protein][protein_id=ABK12716.1] [location=complement(124247..128662)][gbkey=CDS] gene_strand=< genes_overlapping=JNAKOBFD_01153 html_gene_name=<i>JNAKOBFD_01153</i> ← mutation_category=snp_synonymous snp_type=synonymous transl_table=1",
"SNP 5 56 2 783559 G gene_name=JNAKOBFD_03573/JNAKOBFD_03574 gene_position=intergenic (+102/-470) gene_product=[locus_tag=Bcen2424_0544] [db_xref=InterPro:IPR001753] [protein=short chain enoyl-CoA hydratase] [protein_id=ABK07298.1] [location=605087..605863][gbkey=CDS]/16S ribosomal RNA genes_promoter=JNAKOBFD_03574 html_gene_name=<i>JNAKOBFD_03573</i> → / → <i>JNAKOBFD_03574</i> locus_tag=–/– mutation_category=snp_intergenic snp_type=intergenic",
"SNP 6 58 2 1595621 T aa_new_seq=V aa_position=111 aa_ref_seq=G codon_new_seq=GTG codon_number=111 codon_position=2 codon_ref_seq=GGG gene_name=JNAKOBFD_04336 gene_position=332 gene_product=[locus_tag=Bcen2424_2424] [db_xref=InterPro:IPR003754,InterPro:IPR007470] [protein=protein of unknown function DUF513, hemX] [protein_id=ABK09174.1][location=complement(2688435..2690405)] [gbkey=CDS] gene_strand=> genes_overlapping=JNAKOBFD_04336 html_gene_name=<i>JNAKOBFD_04336</i> → mutation_category=snp_nonsynonymous snp_type=nonsynonymous transl_table=1",
"SNP 7 59 2 1595623 A aa_new_seq=I aa_position=112 aa_ref_seq=F codon_new_seq=ATC codon_number=112 codon_position=1 codon_ref_seq=TTC gene_name=JNAKOBFD_04336 gene_position=334 gene_product=[locus_tag=Bcen2424_2424] [db_xref=InterPro:IPR003754,InterPro:IPR007470] [protein=protein of unknown function DUF513, hemX] [protein_id=ABK09174.1][location=complement(2688435..2690405)] [gbkey=CDS] gene_strand=> genes_overlapping=JNAKOBFD_04336 html_gene_name=<i>JNAKOBFD_04336</i> → mutation_category=snp_nonsynonymous snp_type=nonsynonymous transl_table=1",
"SNP 8 61 2 2478091 A gene_name=JNAKOBFD_05108/JNAKOBFD_05109 gene_position=intergenic (-161/+83) gene_product=[locus_tag=Bcen2424_4005] [db_xref=InterPro:IPR007138] [protein=Antibiotic biosynthesis monooxygenase] [protein_id=ABK10741.1] [location=893139..893474][gbkey=CDS]/[locus_tag=Bcen2424_4004] [db_xref=InterPro:IPR002198,InterPro:IPR002347] [protein=short-chain dehydrogenase/reductase SDR] [protein_id=ABK10740.1][location=892176..892895] [gbkey=CDS] html_gene_name=<i>JNAKOBFD_05108</i> ← / ← <i>JNAKOBFD_05109</i> locus_tag=–/– mutation_category=snp_intergenic snp_type=intergenic",
"SNP 9 62 2 2478103 C gene_name=JNAKOBFD_05108/JNAKOBFD_05109 gene_position=intergenic (-173/+71) gene_product=[locus_tag=Bcen2424_4005] [db_xref=InterPro:IPR007138] [protein=Antibiotic biosynthesis monooxygenase] [protein_id=ABK10741.1] [location=893139..893474][gbkey=CDS]/[locus_tag=Bcen2424_4004] [db_xref=InterPro:IPR002198,InterPro:IPR002347] [protein=short-chain dehydrogenase/reductase SDR] [protein_id=ABK10740.1][location=892176..892895] [gbkey=CDS] html_gene_name=<i>JNAKOBFD_05108</i> ← / ← <i>JNAKOBFD_05109</i> locus_tag=–/– mutation_category=snp_intergenic snp_type=intergenic",
"SNP 10 75 3 181113 A aa_new_seq=G aa_position=368 aa_ref_seq=G codon_new_seq=GGT codon_number=368 codon_position=3 codon_ref_seq=GGC gene_name=JNAKOBFD_05752 gene_position=1104 gene_product=[locus_tag=Bcen2424_5364] [db_xref=InterPro:IPR000014,InterPro:IPR003660,InterPro:IPR004089,InterPro:IPR004090,InterPro:IPR005829,InterPro:IPR013655][protein=methyl-accepting chemotaxis sensory transducer with Pas/Pac sensor] [protein_id=ABK12097.1] [location=complement(2460320..2461864)][gbkey=CDS] gene_strand=< genes_overlapping=JNAKOBFD_05752 html_gene_name=<i>JNAKOBFD_05752</i> ← mutation_category=snp_synonymous snp_type=synonymous transl_table=1",
"DEL 11 95 5 1 1248 gene_name=JNAKOBFD_06387 gene_product=JNAKOBFD_06387 genes_inactivated=JNAKOBFD_06387 html_gene_name=<i>JNAKOBFD_06387</i> mutation_category=large_deletion",
"RA 13 . 1 598843 0 A G consensus_score=20.7 frequency=1 gene_name=JNAKOBFD_00523/JNAKOBFD_00524 gene_position=intergenic (+4/+95) gene_product=[locus_tag=Bcen2424_2337] [db_xref=InterPro:IPR001734] [protein=Na+/solute symporter][protein_id=ABK09088.1] [location=complement(2599928..2601478)][gbkey=CDS]/[locus_tag=Bcen2424_2336] [db_xref=InterPro:IPR004360,InterPro:IPR009725] [protein=Glyoxalase/bleomycin resistance protein/dioxygenase] [protein_id=ABK09087.1][location=2599374..2599829] [gbkey=CDS] html_gene_name=<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i> locus_tag=–/– major_base=G major_cov=4/3 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/3 polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 snp_type=intergenic total_cov=4/3",
"RA 16 . 1 1137314 1 . C aa_new_seq=G aa_position=614 aa_ref_seq=V bias_e_value=5670690 bias_p_value=0.804858 codon_new_seq=GGG codon_number=614 codon_position=2 codon_ref_seq=GTG consensus_score=13.5 fisher_strand_p_value=0.444444 frequency=1 gene_name=JNAKOBFD_01012 gene_position=1841 gene_product=[locus_tag=Bcen2424_6766] [db_xref=InterPro:IPR002202] [protein=PE-PGRS family protein][protein_id=ABK13495.1] [location=complement(1015331..1019638)][gbkey=CDS] gene_strand=< html_gene_name=<i>JNAKOBFD_01012</i> ← ks_quality_p_value=1 major_base=C major_cov=5/3 major_frequency=8.889e-01 minor_base=G minor_cov=0/1 new_cov=5/3 new_seq=C polymorphism_frequency=8.889e-01 polymorphism_score=-5.8 prediction=consensus ref_cov=0/0 ref_seq=A snp_type=nonsynonymous total_cov=5/4 transl_table=1",
"RA 38 . 1 1301313 0 G A aa_new_seq=L aa_position=759 aa_ref_seq=L codon_new_seq=TTG codon_number=759 codon_position=1 codon_ref_seq=CTG consensus_score=15.7 frequency=1 gene_name=JNAKOBFD_01153 gene_position=2275 gene_product=[locus_tag=Bcen2424_5983] [db_xref=InterPro:IPR005594,InterPro:IPR006162,InterPro:IPR008635,InterPro:IPR008640][protein=YadA C-terminal domain protein][protein_id=ABK12716.1] [location=complement(124247..128662)][gbkey=CDS] gene_strand=< html_gene_name=<i>JNAKOBFD_01153</i> ← major_base=A major_cov=4/4 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/4 new_seq=A polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 ref_seq=G snp_type=synonymous total_cov=4/4 transl_table=1",
"RA 39 . 1 1301317 0 G C aa_new_seq=T aa_position=757 aa_ref_seq=T codon_new_seq=ACG codon_number=757 codon_position=3 codon_ref_seq=ACC consensus_score=14.0 frequency=1 gene_name=JNAKOBFD_01153 gene_position=2271 gene_product=[locus_tag=Bcen2424_5983] [db_xref=InterPro:IPR005594,InterPro:IPR006162,InterPro:IPR008635,InterPro:IPR008640][protein=YadA C-terminal domain protein][protein_id=ABK12716.1] [location=complement(124247..128662)][gbkey=CDS] gene_strand=< html_gene_name=<i>JNAKOBFD_01153</i> ← major_base=C major_cov=4/4 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/4 new_seq=C polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 ref_seq=G snp_type=synonymous total_cov=4/4 transl_table=1",
"RA 56 . 2 783559 0 A G consensus_score=16.5 frequency=1 gene_name=JNAKOBFD_03573/JNAKOBFD_03574 gene_position=intergenic (+102/-470) gene_product=[locus_tag=Bcen2424_0544] [db_xref=InterPro:IPR001753] [protein=short chain enoyl-CoA hydratase] [protein_id=ABK07298.1] [location=605087..605863][gbkey=CDS]/16S ribosomal RNA html_gene_name=<i>JNAKOBFD_03573</i> → / → <i>JNAKOBFD_03574</i> locus_tag=–/– major_base=G major_cov=4/2 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/2 polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 snp_type=intergenic total_cov=4/2",
"RA 58 . 2 1595621 0 G T aa_new_seq=V aa_position=111 aa_ref_seq=G bias_e_value=7045580 bias_p_value=1 codon_new_seq=GTG codon_number=111 codon_position=2 codon_ref_seq=GGG consensus_score=20.2 fisher_strand_p_value=1 frequency=1 gene_name=JNAKOBFD_04336 gene_position=332 gene_product=[locus_tag=Bcen2424_2424] [db_xref=InterPro:IPR003754,InterPro:IPR007470] [protein=protein of unknown function DUF513, hemX] [protein_id=ABK09174.1][location=complement(2688435..2690405)] [gbkey=CDS] gene_strand=> html_gene_name=<i>JNAKOBFD_04336</i> → ks_quality_p_value=1 major_base=T major_cov=6/2 major_frequency=8.889e-01 minor_base=G minor_cov=1/0 new_cov=6/2 new_seq=T polymorphism_frequency=8.889e-01 polymorphism_score=-5.9 prediction=consensus ref_cov=1/0 ref_seq=G snp_type=nonsynonymous total_cov=7/2 transl_table=1",
"RA 59 . 2 1595623 0 T A aa_new_seq=I aa_position=112 aa_ref_seq=F bias_e_value=7045580 bias_p_value=1 codon_new_seq=ATC codon_number=112 codon_position=1 codon_ref_seq=TTC consensus_score=18.7 fisher_strand_p_value=1 frequency=1 gene_name=JNAKOBFD_04336 gene_position=334 gene_product=[locus_tag=Bcen2424_2424] [db_xref=InterPro:IPR003754,InterPro:IPR007470] [protein=protein of unknown function DUF513, hemX] [protein_id=ABK09174.1][location=complement(2688435..2690405)] [gbkey=CDS] gene_strand=> html_gene_name=<i>JNAKOBFD_04336</i> → ks_quality_p_value=1 major_base=A major_cov=6/2 major_frequency=8.889e-01 minor_base=T minor_cov=1/0 new_cov=6/2 new_seq=A polymorphism_frequency=8.889e-01 polymorphism_score=-5.4 prediction=consensus ref_cov=1/0 ref_seq=T snp_type=nonsynonymous total_cov=7/2 transl_table=1",
"RA 61 . 2 2478091 0 T A bias_e_value=4534860 bias_p_value=0.643647 consensus_score=14.8 fisher_strand_p_value=0.285714 frequency=1 gene_name=JNAKOBFD_05108/JNAKOBFD_05109 gene_position=intergenic (-161/+83) gene_product=[locus_tag=Bcen2424_4005] [db_xref=InterPro:IPR007138] [protein=Antibiotic biosynthesis monooxygenase] [protein_id=ABK10741.1] [location=893139..893474][gbkey=CDS]/[locus_tag=Bcen2424_4004] [db_xref=InterPro:IPR002198,InterPro:IPR002347] [protein=short-chain dehydrogenase/reductase SDR] [protein_id=ABK10740.1][location=892176..892895] [gbkey=CDS] html_gene_name=<i>JNAKOBFD_05108</i> ← / ← <i>JNAKOBFD_05109</i> ks_quality_p_value=1 locus_tag=–/– major_base=A major_cov=1/5 major_frequency=8.571e-01 minor_base=T minor_cov=1/0 new_cov=1/5 polymorphism_frequency=8.571e-01 polymorphism_score=-5.3 prediction=consensus ref_cov=1/0 snp_type=intergenic total_cov=2/5",
"RA 62 . 2 2478103 0 G C bias_e_value=4928640 bias_p_value=0.699537 consensus_score=12.3 fisher_strand_p_value=0.333333 frequency=1 gene_name=JNAKOBFD_05108/JNAKOBFD_05109 gene_position=intergenic (-173/+71) gene_product=[locus_tag=Bcen2424_4005] [db_xref=InterPro:IPR007138] [protein=Antibiotic biosynthesis monooxygenase] [protein_id=ABK10741.1] [location=893139..893474][gbkey=CDS]/[locus_tag=Bcen2424_4004] [db_xref=InterPro:IPR002198,InterPro:IPR002347] [protein=short-chain dehydrogenase/reductase SDR] [protein_id=ABK10740.1][location=892176..892895] [gbkey=CDS] html_gene_name=<i>JNAKOBFD_05108</i> ← / ← <i>JNAKOBFD_05109</i> ks_quality_p_value=1 locus_tag=–/– major_base=C major_cov=1/4 major_frequency=8.333e-01 minor_base=G minor_cov=1/0 new_cov=1/4 polymorphism_frequency=8.333e-01 polymorphism_score=-5.0 prediction=consensus ref_cov=1/0 snp_type=intergenic total_cov=2/4",
"RA 75 . 3 181113 0 G A aa_new_seq=G aa_position=368 aa_ref_seq=G codon_new_seq=GGT codon_number=368 codon_position=3 codon_ref_seq=GGC consensus_score=15.0 frequency=1 gene_name=JNAKOBFD_05752 gene_position=1104 gene_product=[locus_tag=Bcen2424_5364] [db_xref=InterPro:IPR000014,InterPro:IPR003660,InterPro:IPR004089,InterPro:IPR004090,InterPro:IPR005829,InterPro:IPR013655][protein=methyl-accepting chemotaxis sensory transducer with Pas/Pac sensor] [protein_id=ABK12097.1] [location=complement(2460320..2461864)][gbkey=CDS] gene_strand=< html_gene_name=<i>JNAKOBFD_05752</i> ← major_base=A major_cov=4/2 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/2 new_seq=A polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 ref_seq=G snp_type=synonymous total_cov=4/2 transl_table=1",
"MC 95 . 5 1 1248 0 0 gene_name=JNAKOBFD_06387 gene_product=JNAKOBFD_06387 html_gene_name=<i>JNAKOBFD_06387</i> left_inside_cov=0 left_outside_cov=NA right_inside_cov=0 right_outside_cov=NA"
]
return expected
def test_read_gd_file(parser):
filename = data_folder / "SC1360.annotated.filtered.gd"
result = parser.read_gd_file(filename)
assert result == example_gd_file_contents()
def test_is_mutation(parser):
expected = [True] * 11 + [False] * 11
result = [parser.is_mutation(i) for i in example_gd_file_contents()]
assert result == expected
def test_is_evidence(parser):
expected = [False] * 11 + [True] * 11
result = [parser.is_evidence(i) for i in example_gd_file_contents()]
assert result == expected
sample_gd_lines = [
(
"SNP 1 13 1 598843 G gene_name=JNAKOBFD_00523/JNAKOBFD_00524 gene_position=intergenic (+4/+95) gene_product=[locus_tag=Bcen2424_2337] [db_xref=InterPro:IPR001734] [protein=Na+/solute symporter][protein_id=ABK09088.1] genes_promoter=JNAKOBFD_00524 html_gene_name=<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i> locus_tag=–/– mutation_category=snp_intergenic snp_type=intergenic",
{
'category_id': 'SNP', 'evidence_id': '1', 'parent_ids': '13', 'seq_id': '1', 'position': '598843',
'new_seq': 'G',
'gene_name': "JNAKOBFD_00523/JNAKOBFD_00524", 'gene_position': "intergenic (+4/+95)",
'gene_product': "[locus_tag=Bcen2424_2337] [db_xref=InterPro:IPR001734] [protein=Na+/solute symporter][protein_id=ABK09088.1]",
'genes_promoter': "JNAKOBFD_00524",
'html_gene_name': "<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i>",
'locus_tag': "–/–", 'mutation_category': 'snp_intergenic', 'snp_type': 'intergenic'
}
),
(
"SNP 4 39 1 1301317 C aa_new_seq=T aa_position=757 aa_ref_seq=T codon_new_seq=ACG codon_number=757 codon_position=3 codon_ref_seq=ACC gene_name=JNAKOBFD_01153 gene_position=2271 gene_product=[locus_tag=Bcen2424_5983] gene_strand=< genes_overlapping=JNAKOBFD_01153 html_gene_name=<i>JNAKOBFD_01153</i> ← mutation_category=snp_synonymous snp_type=synonymous transl_table=1",
{
'category_id': 'SNP', 'evidence_id': '4', 'parent_ids': '39', 'seq_id': '1', 'position': '1301317',
'new_seq': 'C',
'aa_new_seq': 'T', 'aa_position': '757', 'aa_ref_seq': 'T', 'codon_new_seq': 'ACG',
'codon_number': '757', 'codon_position': '3', 'codon_ref_seq': 'ACC',
'gene_name': "JNAKOBFD_01153", 'gene_position': '2271',
'gene_product': '[locus_tag=Bcen2424_5983]', 'gene_strand': '<',
'genes_overlapping': 'JNAKOBFD_01153', 'html_gene_name': "<i>JNAKOBFD_01153</i> ←",
'mutation_category': 'snp_synonymous', 'snp_type': 'synonymous', 'transl_table': '1'
}
),
(
"INS 2 16 1 1137319 C gene_name=JNAKOBFD_01012 gene_position=coding (1836/1887 nt) gene_product=[locus_tag=Bcen2424_6766] gene_strand=< genes_overlapping=JNAKOBFD_01012 html_gene_name=<i>JNAKOBFD_01012</i> ← mutation_category=small_indel repeat_length=1 repeat_new_copies=6 repeat_ref_copies=5 repeat_seq=C",
{
'category_id': 'INS', 'evidence_id': '2', 'parent_ids': '16', 'seq_id': '1', 'position': '1137319',
'new_seq': 'C',
'gene_name': "JNAKOBFD_01012", 'gene_position': "coding (1836/1887 nt)",
'gene_product': "[locus_tag=Bcen2424_6766]", 'gene_strand': '<',
'genes_overlapping': 'JNAKOBFD_01012', 'html_gene_name': "<i>JNAKOBFD_01012</i> ←",
'mutation_category': 'small_indel', 'repeat_length': '1', 'repeat_new_copies': '6',
'repeat_ref_copies': '5', 'repeat_seq': 'C'
}
),
(
"DEL 11 95 5 1 1248 gene_name=JNAKOBFD_06387 gene_product=JNAKOBFD_06387 genes_inactivated=JNAKOBFD_06387 html_gene_name=<i>JNAKOBFD_06387</i> mutation_category=large_deletion",
{
'category_id': 'DEL', 'evidence_id': '11', 'parent_ids': '95', 'seq_id': '5', 'position': '1',
'size': '1248',
'gene_name': 'JNAKOBFD_06387', 'gene_product': 'JNAKOBFD_06387',
'genes_inactivated': 'JNAKOBFD_06387', 'html_gene_name': "<i>JNAKOBFD_06387</i>",
'mutation_category': 'large_deletion'
}
),
(
"RA 13 . 1 598843 0 A G consensus_score=20.7 frequency=1 gene_name=JNAKOBFD_00523/JNAKOBFD_00524 gene_position=intergenic (+4/+95) gene_product=[locus_tag=Bcen2424_2337] html_gene_name=<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i> locus_tag=–/– major_base=G major_cov=4/3 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/3 polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 snp_type=intergenic total_cov=4/3",
{
'category_id': "RA", 'evidence_id': '13', 'parent_ids': '.', 'seq_id': '1', 'position': '598843',
'insert_position': '0', 'ref_base': 'A', 'new_base': 'G', 'consensus_score': '20.7', 'frequency': '1',
'gene_name': 'JNAKOBFD_00523/JNAKOBFD_00524', 'gene_position': 'intergenic (+4/+95)',
'gene_product': '[locus_tag=Bcen2424_2337]',
'html_gene_name': '<i>JNAKOBFD_00523</i> → / ← <i>JNAKOBFD_00524</i>',
'locus_tag': "–/–", 'major_base': 'G', 'major_frequency': '1.000e+00',
'minor_base': 'N', 'polymorphism_frequency': '1.000e+00',
'polymorphism_score': 'NA', 'prediction': 'consensus', 'snp_type': 'intergenic',
#'total_cov': '4/3',
'total_cov_forward': '4',
'total_cov_reverse': '3',
#'major_cov': '4/3',
'major_cov_forward': '4',
'major_cov_reverse': '3',
#'new_cov': '4/3',
'new_cov_forward': '4',
'new_cov_reverse': '3',
#'minor_cov': '0/0',
'minor_cov_forward': '0',
'minor_cov_reverse': '0',
#'ref_cov': '0/0'
'ref_cov_forward': '0',
'ref_cov_reverse': '0'
}
),
(
"RA 39 . 1 1301317 0 G C aa_new_seq=T aa_position=757 aa_ref_seq=T codon_new_seq=ACG codon_number=757 codon_position=3 codon_ref_seq=ACC consensus_score=14.0 frequency=1 gene_name=JNAKOBFD_01153 gene_position=2271 gene_product=[locus_tag=Bcen2424_5983] gene_strand=< html_gene_name=<i>JNAKOBFD_01153</i> ← major_base=C major_cov=4/4 major_frequency=1.000e+00 minor_base=N minor_cov=0/0 new_cov=4/4 new_seq=C polymorphism_frequency=1.000e+00 polymorphism_score=NA prediction=consensus ref_cov=0/0 ref_seq=G snp_type=synonymous total_cov=4/4 transl_table=1",
{
'category_id': 'RA', 'evidence_id': '39', 'parent_ids': '.', 'seq_id': '1',
'position': '1301317',
'insert_position': '0', 'ref_base': 'G', 'new_base': 'C', 'aa_new_seq': 'T', 'aa_position': '757',
'aa_ref_seq': 'T',
'codon_new_seq': 'ACG', 'codon_number': '757', 'codon_position': '3', 'codon_ref_seq': 'ACC',
'consensus_score': '14.0',
'frequency': '1', 'gene_name': 'JNAKOBFD_01153', 'gene_position': '2271',
'gene_product': '[locus_tag=Bcen2424_5983]',
'gene_strand': '<', 'html_gene_name': '<i>JNAKOBFD_01153</i> ←', 'major_base': 'C',
'major_frequency': '1.000e+00', 'minor_base': 'N',
'new_seq': 'C',
'polymorphism_frequency': '1.000e+00', 'polymorphism_score': 'NA', 'prediction': 'consensus',
'ref_seq': 'G', 'snp_type': 'synonymous', 'transl_table': '1',
#'major_cov': '4/4',
'major_cov_forward': '4',
'major_cov_reverse': '4',
#'ref_cov': '0/0',
'ref_cov_forward': '0',
'ref_cov_reverse': '0',
#'minor_cov': '0/0',
'minor_cov_forward': '0',
'minor_cov_reverse': '0',
#'new_cov': '4/4',
'new_cov_forward': '4',
'new_cov_reverse': '4',
#'total_cov': '4/4',
'total_cov_forward': '4',
'total_cov_reverse': '4'
}
),
(
"MC 95 . 5 1 1248 0 0 gene_name=JNAKOBFD_06387 gene_product=JNAKOBFD_06387 html_gene_name=<i>JNAKOBFD_06387</i> left_inside_cov=0 left_outside_cov=NA right_inside_cov=0 right_outside_cov=NA",
{'category_id': 'MC', 'evidence_id': '95', 'parent_ids': '.', 'seq_id': '5', 'start': '1',
'end': '1248', 'start_range': '0', 'end_range': '0', 'gene_name': 'JNAKOBFD_06387',
'gene_product': "JNAKOBFD_06387", 'html_gene_name': '<i>JNAKOBFD_06387</i>', 'left_inside_cov': '0',
'left_outside_cov': 'NA', 'right_inside_cov': '0', 'right_outside_cov': 'NA'
}
)
]
@pytest.mark.parametrize(
"line, expected",
sample_gd_lines
)
def test_split_fields(parser, line, expected):
line = line.split('\t')
number_of_positional_arguments = len([i for i in line if '=' not in i])
line = line[number_of_positional_arguments:]
result = parser.parse_keyword_fields(line)
# Need to remove the empty fields since this method is supposed to be given a truncated line anyway.
result = {k: v for k, v in result.items() if v}
_to_remove = ['category_id', 'evidence_id', 'parent_ids', 'seq_id', 'position', 'new_seq', 'size']
# Need to remove the positional arguments
result = {k: v for k, v in result.items() if k not in _to_remove}
# Need to filter out the positional arguments from the positional_arguments
expected = {k:v for k, v in result.items() if k in result.keys()}
# The `split_fields` method is only used on part of the line.
assert result == expected
@pytest.mark.parametrize(
"line, expected",
[
("SNP 1 13 1 598843 G", {
'category_id': 'SNP', 'evidence_id': '1', 'parent_ids': '13', 'seq_id': '1', 'position': '598843',
'new_seq': 'G'
}),
("SNP 4 39 1 1301317 C", {
'category_id': 'SNP', 'evidence_id': '4', 'parent_ids': '39', 'seq_id': '1', 'position': '1301317',
'new_seq': 'C'
}),
("INS 2 16 1 1137319 C", {
'category_id': 'INS', 'evidence_id': '2', 'parent_ids': '16', 'seq_id': '1', 'position': '1137319',
'new_seq': 'C'
}),
("DEL 11 95 5 1 1248", {
'category_id': 'DEL', 'evidence_id': '11', 'parent_ids': '95', 'seq_id': '5', 'position': '1',
'size': '1248'
}),
("MOB 1 117,118 REL606 16972 IS150 -1 3", {
'category_id': 'MOB', 'evidence_id': '1', 'parent_ids': '117,118', 'seq_id': 'REL606', 'position': '16972',
'repeat_name': 'IS150', 'strand': '-1', 'duplication_size': '3'
})
]
)
def test_parse_positional_fields(parser, line, expected):
line = line.split("\t")
result = parser.parse_positional_fields(line)
assert result == expected
@pytest.mark.parametrize(
"line, expected",
sample_gd_lines
)
def test_parse_mutation(parser, line, expected):
result = parser.parse_line(line)
assert result == expected
| 99.019763 | 919 | 0.757624 | [
"MIT"
] | cdeitrick/isolate_parsers | tests/test_gd_to_table.py | 25,100 | Python |
#!/usr/bin/env python3
import argparse
import os
import pprint
import struct
import sys
import traceback
def read_uint32(fp, pos):
"""Read 4 little-endian bytes into an unsigned 32-bit integer. Return value, position + 4."""
fp.seek(pos)
val = struct.unpack("<I", fp.read(4))[0]
return val, pos + 4
def read_strn(fp, pos, size):
"""Read N null-padded bytes into an ascii encoded string. Return value, position + N."""
fp.seek(pos)
val = struct.unpack("<" + str(size) + "s", fp.read(size))[0]
return val.decode("ascii").strip("\0"), pos + size
def write_into(fp, fmt, *args):
b = struct.pack(fmt, *args)
fp.write(b)
return len(b)
def pack(file_array, args):
"""Packs files or folders given into the first argument: a target file name or a directory (archive will be named the same as directory).
If archive name exists, appends number and tries again."""
output_target = file_array[0]
input_set = file_array.copy() # shallow copy the array
if os.path.isdir(output_target):
# passed in a folder: name is our dat file name, input is full array (default)
output_target = os.path.basename(os.path.realpath(output_target.rstrip("\\/")))
if len(output_target) == 0:
print("Error: Archive name invalid.", file=sys.stderr)
return
elif len(output_target) > 4 and output_target[-4:].upper() == ".DAT":
# passed in a .dat name: name stays, input is everything else in array
input_set = file_array[1:]
else:
# passed in just a set of files where [0] not being .dat so no name hint: error
print("Error: Unknown file(s). Please provide a .DAT file or existing folder to pack it.", file=sys.stderr)
return
# traverse directories now to get all file paths going in
input_files = []
for f in input_set:
for dirpath, dirnames, filenames in os.walk(f):
# skip dirs that start with . (like .git, ._DS_STORE, etc) or __ (like __MACOSX)
for dirname in dirnames:
if dirname.startswith(".") or dirname.startswith("__"):
dirnames.remove(dirname)
input_files += [os.path.join(dirpath, name) for name in filenames if not name.startswith(".") and not name.startswith("__")]
# look for path of output and if conflicts add numbers
try_count = 0
alt = ""
while os.path.exists("{}{}.dat".format(output_target, alt)):
try_count += 1
alt = "-{}".format(try_count)
if try_count >= 100:
print("Error: Archive output file exists and no alternative.", file=sys.stderr)
return
output_target = "{}{}.dat".format(output_target, alt)
if not args.quiet:
print("Packing {} files into {}...".format(len(input_files), output_target))
# iterate files for file_tables
file_tables = []
for file_path in input_files:
try:
#print(file_path)
# start by creating file table objects
name, ext = os.path.splitext(os.path.basename(file_path).lower())
# sanity checking
try:
name = name.encode("ascii")
ext = ext.encode("ascii")
except UnicodeError:
print("Error: Input file names must be valid ASCII. {} skipped.".format(file_path), file=sys.stderr)
continue
if len(ext) != 4:
print("Error: Input file names must have 3 character extensions. {} skipped.".format(file_path), file=sys.stderr)
continue
ext = ext[1:]
if len(name) < 1 or len(name) > 8:
print("Error: Input file names must be <= 8 characters in length. {} skipped.".format(file_path), file=sys.stderr)
continue
if b"." in name:
print("Error: Input file names cannot have multiple extensions or additional dots. {} skipped.".format(file_path), file=sys.stderr)
continue
# create fd object (pos TBD)
this_name_obj = {"name": name, "size": os.path.getsize(file_path), "pos": None, "full_name": file_path}
# create ft object, or use existing (pos and count TBD)
this_ext_table = None
for table in file_tables:
if table["name"] == ext:
this_ext_table = table
break
if this_ext_table is None:
this_ext_table = {"name": ext, "count": None, "pos": None, "files": [this_name_obj]}
file_tables.append(this_ext_table)
else:
this_ext_table["files"].append(this_name_obj)
except Exception as err:
print("Error: Uncaught exception locating file: " + file_path, file=sys.stderr)
print("{}".format(err), file=sys.stderr)
if not args.quiet:
traceback.print_exc()
return
try:
# determine offsets of tables
pos = 8 # header size
ft_count = len(file_tables)
pos += ft_count * 12 # end of ft tables
for ft in file_tables:
fd_count = len(ft["files"])
ft["count"] = fd_count
ft["pos"] = pos
pos += fd_count * 16 # size of fd entries for this ext
# determine offsets of files
for ft in file_tables:
for fd in ft["files"]:
fd["pos"] = pos
pos += fd["size"]
# start writing archive
with open(output_target, "wb") as f:
pos = 0
pos += write_into(f, "<II", 12345678, ft_count)
for ft in file_tables:
f.write(ft["name"].ljust(4, b"\0"))
pos += 4
pos += write_into(f, "<II", ft["pos"], ft["count"])
for ft in file_tables:
for fd in ft["files"]:
f.write(fd["name"].ljust(8, b"\0"))
pos += 8
pos += write_into(f, "<II", fd["size"], fd["pos"])
for ft in file_tables:
for fd in ft["files"]:
file_path = fd["full_name"]
if not args.quiet:
print(file_path)
try:
with open(file_path, "rb") as fi:
f.write(fi.read(fd["size"]))
except Exception as err:
print("Error: Uncaught exception writing file to archive: " + output_target + " <- " + file_path, file=sys.stderr)
print("{}".format(err), file=sys.stderr)
if not args.quiet:
traceback.print_exc()
return
except Exception as err2:
print("Error: Uncaught exception writing archive: " + output_target, file=sys.stderr)
print("{}".format(err2), file=sys.stderr)
if not args.quiet:
traceback.print_exc()
def unpack(file_array, args):
"""Unpacks one or more files given the provided arguments.
If contents exist in the target output folder, they will be overwritten."""
if not args.quiet:
print("Unpacking {} files...".format(len(file_array)))
# var used for not spamming errors about files of the wrong type, assumes at least some are valid...
is_multiple = (len(file_array) != 1)
for file_path in file_array:
try:
if not os.path.isfile(file_path):
print("Error: File not found: " + file_path, file=sys.stderr)
continue
# base file name (folder name we'll be using)
basename = os.path.basename(os.path.realpath(file_path))
if len(basename) <= 4 or basename[-4:].upper() != ".DAT":
if not is_multiple:
print("Error: Not an archive of the correct format [file name error]: " + file_path, file=sys.stderr)
continue
# make dirname
dirname = basename[:-4] + "/"
# print bare archive name
if not args.quiet or args.test:
print(basename)
# start reading
with open(file_path, "rb") as f:
pos = 0
magic, pos = read_uint32(f, pos)
# must start with magic
if magic != 12345678:
if not is_multiple:
print("Error: Not an archive of the correct format [magic number error]: " + file_path, file=sys.stderr)
continue
# read file table count
ft_count, pos = read_uint32(f, pos)
# read each file table
file_tables = []
for x in range(0, ft_count):
ftext, pos = read_strn (f, pos, 4)
ftpos, pos = read_uint32(f, pos)
ftnum, pos = read_uint32(f, pos)
ft = {"name": ftext, "count": ftnum, "pos": ftpos, "files": []}
# subtables
for y in range(0, ftnum):
fdnam, ftpos = read_strn (f, ftpos, 8)
fdsiz, ftpos = read_uint32(f, ftpos)
fdpos, ftpos = read_uint32(f, ftpos)
fd = {"name": fdnam, "size": fdsiz, "pos": fdpos}
ft["files"].append(fd)
file_tables.append(ft)
#print("{}".format(file_tables))
if args.test:
# -t: test mode, print contents and exit
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(file_tables)
else:
# normal, write to files in dirname
if not os.path.isdir(dirname):
# DNE
os.mkdir(dirname)
# iterate through structures
for ft in file_tables:
for fd in ft["files"]:
# print "folder/file"
out_path = os.path.join(dirname, fd["name"] + "." + ft["name"])
if not args.quiet:
print(out_path)
# write to "folder/file"
with open(out_path, "wb") as fo:
f.seek(fd["pos"])
fo.write(f.read(fd["size"]))
except Exception as err:
print("Error: Uncaught exception parsing file: " + file_path, file=sys.stderr)
print("{}".format(err), file=sys.stderr)
if not args.quiet:
traceback.print_exc()
continue
def main():
MODE_UNSET = 0
MODE_UNPACK = 1
MODE_PACK = 2
parser = argparse.ArgumentParser(description="Packs and unpacks Zwei!! [Zwei: The Arges Adventure] DAT archives.")
parser.add_argument("-u", "--unpack", help="try unpacking", action="store_true")
parser.add_argument("-p", "--pack", help="try packing", action="store_true")
parser.add_argument("-t", "--test", help="do not write output and print what would happen instead (test)", action="store_true")
parser.add_argument("-q", "--quiet", help="suppress per-file output", action="store_true")
parser.add_argument("-s", "--from-sh", help="suppress pause on completion, if invoked from CLI", action="store_true")
parser.add_argument("infile", nargs="*", help="file(s) to read")
args = parser.parse_args()
mode = MODE_UNSET
if args.pack and args.unpack:
print("Error: You cannot both pack and unpack the input.", file=sys.stderr)
return args.from_sh
count = len(args.infile)
if count == 0:
print("Error: No files. Please provide a .DAT file to unpack or a folder to pack.", file=sys.stderr)
return args.from_sh
if args.pack:
# -p
mode = MODE_PACK
elif args.unpack:
# -u
mode = MODE_UNPACK
if mode == MODE_UNSET and os.path.isdir(args.infile[0]):
# first file is dir -> pack
mode = MODE_PACK
if mode == MODE_UNSET and os.path.isfile(args.infile[0]) and args.infile[0][-4:].upper() == ".DAT":
# first file is .dat -> unpack
mode = MODE_UNPACK
if mode == MODE_UNSET:
print("Error: Unknown file(s). Please provide a .DAT file to unpack or a folder to pack.", file=sys.stderr)
elif mode == MODE_UNPACK:
unpack(args.infile, args)
elif mode == MODE_PACK:
pack(args.infile, args)
return args.from_sh
if __name__ == "__main__":
if not main():
input("Press Enter to close...")
| 39.705882 | 147 | 0.538012 | [
"MIT"
] | nmbook/zw1-pack | zw1_pack.py | 12,825 | Python |
class Spam:
def eggs(self):
assert False
def eggs_and_ham(self):
assert False
| 14.714286 | 27 | 0.592233 | [
"Apache-2.0"
] | 06needhamt/intellij-community | python/testData/create_tests/create_tst_class.expected_pytest_3k.py | 103 | Python |
arr = []
dict = {}
for i in range(10):
arr.append(int(input())%42)
for i in arr:
dict[i] = 0
print(len(dict)) | 12 | 31 | 0.55 | [
"MIT"
] | Lumia1108/TIL | baekjoon/Python/3052.py | 120 | Python |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy admx/adml template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
from __future__ import print_function
import codecs
import filecmp
import os
import re
import sys
MAIN_POLICY_KEY = r'Software\Policies\HuhiSoftware\Update'
ADMX_HEADER = '<policyDefinitions revision="1.0" schemaVersion="1.0">'
ADMX_ENVIRONMENT = '''
<policyNamespaces>
<target namespace="Google.Policies.Update" prefix="update"/>
<using namespace="Google.Policies" prefix="Google"/>
<using prefix="windows" namespace="Microsoft.Policies.Windows" />
</policyNamespaces>
<supersededAdm fileName="GoogleUpdate.adm" />
<resources minRequiredRevision="1.0" />
<supportedOn>
<definitions>
<definition name="Sup_GoogleUpdate1_2_145_5"
displayName="$(string.Sup_GoogleUpdate1_2_145_5)" />
<definition name="Sup_GoogleUpdate1_3_21_81"
displayName="$(string.Sup_GoogleUpdate1_3_21_81)" />
<definition name="Sup_GoogleUpdate1_3_26_0"
displayName="$(string.Sup_GoogleUpdate1_3_26_0)" />
<definition name="Sup_GoogleUpdate1_3_33_5"
displayName="$(string.Sup_GoogleUpdate1_3_33_5)" />
<definition name="Sup_GoogleUpdate1_3_34_3"
displayName="$(string.Sup_GoogleUpdate1_3_34_3)" />
</definitions>
</supportedOn>
'''
ADMX_CATEGORIES = r'''
<categories>
<category name="Cat_GoogleUpdate" displayName="$(string.Cat_GoogleUpdate)"
explainText="$(string.Explain_GoogleUpdate)">
<parentCategory ref="Google:Cat_Google" />
</category>
<category name="Cat_Preferences" displayName="$(string.Cat_Preferences)"
explainText="$(string.Explain_Preferences)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_ProxyServer" displayName="$(string.Cat_ProxyServer)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_Applications" displayName="$(string.Cat_Applications)"
explainText="$(string.Explain_Applications)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
%(AppCategorList)s
</categories>
'''
ADMX_POLICIES = r'''
<policies>
<policy name="Pol_AutoUpdateCheckPeriod" class="Machine"
displayName="$(string.Pol_AutoUpdateCheckPeriod)"
explainText="$(string.Explain_AutoUpdateCheckPeriod)"
presentation="$(presentation.Pol_AutoUpdateCheckPeriod)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<decimal id="Part_AutoUpdateCheckPeriod"
key="%(RootPolicyKey)s"
valueName="AutoUpdateCheckPeriodMinutes"
required="true" minValue="0" maxValue="43200" />
</elements>
</policy>
<policy name="Pol_DownloadPreference" class="Machine"
displayName="$(string.Pol_DownloadPreference)"
explainText="$(string.Explain_DownloadPreference)"
presentation="$(presentation.Pol_DownloadPreference)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_26_0" />
<elements>
<enum id="Part_DownloadPreference" key="%(RootPolicyKey)s"
valueName="DownloadPreference">
<item displayName="$(string.DownloadPreference_DropDown)">
<value>
<string>cacheable</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdateCheckSuppressedPeriod" class="Machine"
displayName="$(string.Pol_UpdateCheckSuppressedPeriod)"
explainText="$(string.Explain_UpdateCheckSuppressedPeriod)"
presentation="$(presentation.Pol_UpdateCheckSuppressedPeriod)"
key="Software\Policies\Google\Update">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<decimal id="Part_UpdateCheckSuppressedStartHour"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartHour"
required="true" minValue="0" maxValue="23" />
<decimal id="Part_UpdateCheckSuppressedStartMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartMin"
required="true" minValue="0" maxValue="59" />
<decimal id="Part_UpdateCheckSuppressedDurationMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedDurationMin"
required="true" minValue="1" maxValue="960" />
</elements>
</policy>
<policy name="Pol_ProxyMode" class="Machine"
displayName="$(string.Pol_ProxyMode)"
explainText="$(string.Explain_ProxyMode)"
presentation="$(presentation.Pol_ProxyMode)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<enum id="Part_ProxyMode" key="%(RootPolicyKey)s"
valueName="ProxyMode">
<item displayName="$(string.ProxyDisabled_DropDown)">
<value>
<string>direct</string>
</value>
</item>
<item displayName="$(string.ProxyAutoDetect_DropDown)">
<value>
<string>auto_detect</string>
</value>
</item>
<item displayName="$(string.ProxyPacScript_DropDown)">
<value>
<string>pac_script</string>
</value>
</item>
<item displayName="$(string.ProxyFixedServers_DropDown)">
<value>
<string>fixed_servers</string>
</value>
</item>
<item displayName="$(string.ProxyUseSystem_DropDown)">
<value>
<string>system</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_ProxyServer" class="Machine"
displayName="$(string.Pol_ProxyServer)"
explainText="$(string.Explain_ProxyServer)"
presentation="$(presentation.Pol_ProxyServer)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyServer" valueName="ProxyServer" />
</elements>
</policy>
<policy name="Pol_ProxyPacUrl" class="Machine"
displayName="$(string.Pol_ProxyPacUrl)"
explainText="$(string.Explain_ProxyPacUrl)"
presentation="$(presentation.Pol_ProxyPacUrl)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyPacUrl" valueName="ProxyPacUrl" />
</elements>
</policy>
<policy name="Pol_DefaultAllowInstallation" class="Machine"
displayName="$(string.Pol_DefaultAllowInstallation)"
explainText="$(string.Explain_DefaultAllowInstallation)"
presentation="$(presentation.Pol_DefaultAllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy" key="%(RootPolicyKey)s"
valueName="InstallDefault" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_DefaultUpdatePolicy" class="Machine"
displayName="$(string.Pol_DefaultUpdatePolicy)"
explainText="$(string.Explain_DefaultUpdatePolicy)"
presentation="$(presentation.Pol_DefaultUpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy" key="%(RootPolicyKey)s"
valueName="UpdateDefault" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
%(AppPolicyList)s
</policies>
'''
ADMX_APP_POLICY_TEMPLATE = '''\
<policy name="Pol_AllowInstallation%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_AllowInstallation)"
explainText="$(string.Explain_Install%(AppLegalId)s)"
presentation="$(presentation.Pol_AllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy"
valueName="Install%(AppGuid)s" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdatePolicy%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_UpdatePolicy)"
explainText="$(string.Explain_AutoUpdate%(AppLegalId)s)"
presentation="$(presentation.Pol_UpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy"
valueName="Update%(AppGuid)s" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_TargetVersionPrefix%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_TargetVersionPrefix)"
explainText="$(string.Explain_TargetVersionPrefix%(AppLegalId)s)"
presentation="$(presentation.Pol_TargetVersionPrefix)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<text id="Part_TargetVersionPrefix"
valueName="TargetVersionPrefix%(AppGuid)s" />
</elements>
</policy>
<policy name="Pol_RollbackToTargetVersion%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_RollbackToTargetVersion)"
explainText="$(string.Explain_RollbackToTargetVersion%(AppLegalId)s)"
presentation="$(presentation.Pol_RollbackToTargetVersion)"
key="%(RootPolicyKey)s"
valueName="RollbackToTargetVersion%(AppGuid)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_34_3" />
<enabledValue><decimal value="1" /></enabledValue>
<disabledValue><decimal value="0" /></disabledValue>
</policy>'''
ADMX_FOOTER = '</policyDefinitions>'
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
return re.sub(r'[\W_]', '', input_string)
def GenerateGroupPolicyTemplateAdmx(apps):
"""Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file.
"""
def _GenerateCategories(apps):
"""Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
"""
admx_app_category_template = (
' <category name="Cat_%(AppLegalId)s"\n'
' displayName="$(string.Cat_%(AppLegalId)s)">\n'
' <parentCategory ref="Cat_Applications" />\n'
' </category>')
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append(admx_app_category_template % {
'AppLegalId': _CreateLegalIdentifier(app_name)
})
return ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}
def _GeneratePolicies(apps):
"""Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
"""
app_policy_list = []
for app in apps:
app_name, app_guid, _, _ = app
app_policy_list.append(ADMX_APP_POLICY_TEMPLATE % {
'AppLegalId': _CreateLegalIdentifier(app_name),
'AppGuid': app_guid,
'RootPolicyKey': MAIN_POLICY_KEY,
})
return ADMX_POLICIES % {
'AppPolicyList': '\n'.join(app_policy_list),
'RootPolicyKey': MAIN_POLICY_KEY,
}
target_contents = [
ADMX_HEADER,
ADMX_ENVIRONMENT,
_GenerateCategories(apps),
_GeneratePolicies(apps),
ADMX_FOOTER,
]
return ''.join(target_contents)
ADML_HEADER = '''\
<policyDefinitionResources revision="1.0" schemaVersion="1.0">
'''
ADML_ENVIRONMENT = '''\
<displayName>
</displayName>
<description>
</description>
'''
ADML_DEFAULT_ROLLBACK_DISCLAIMER = (
'This policy is meant to serve as temporary measure when Enterprise '
'Administrators need to downgrade for business reasons. To ensure '
'users are protected by the latest security updates, the most recent '
'version should be used. When versions are downgraded to older '
'versions, there could be incompatibilities.')
ADML_DOMAIN_REQUIREMENT_EN = (
'This policy is available only on Windows instances that are joined to a '
'Microsoft® Active Directory® domain.')
ADML_PREDEFINED_STRINGS_TABLE_EN = [
('Sup_GoogleUpdate1_2_145_5', 'At least Google Update 1.2.145.5'),
('Sup_GoogleUpdate1_3_21_81', 'At least Google Update 1.3.21.81'),
('Sup_GoogleUpdate1_3_26_0', 'At least Google Update 1.3.26.0'),
('Sup_GoogleUpdate1_3_33_5', 'At least Google Update 1.3.33.5'),
('Sup_GoogleUpdate1_3_34_3', 'At least Google Update 1.3.34.3'),
('Cat_GoogleUpdate', 'Google Update'),
('Cat_Preferences', 'Preferences'),
('Cat_ProxyServer', 'Proxy Server'),
('Cat_Applications', 'Applications'),
('Pol_AutoUpdateCheckPeriod', 'Auto-update check period override'),
('Pol_UpdateCheckSuppressedPeriod',
'Time period in each day to suppress auto-update check'),
('Pol_DownloadPreference', 'Download URL class override'),
('DownloadPreference_DropDown', 'Cacheable download URLs'),
('Pol_ProxyMode', 'Choose how to specify proxy server settings'),
('Pol_ProxyServer', 'Address or URL of proxy server'),
('Pol_ProxyPacUrl', 'URL to a proxy .pac file'),
('Pol_DefaultAllowInstallation', 'Allow installation default'),
('Pol_AllowInstallation', 'Allow installation'),
('Pol_DefaultUpdatePolicy', 'Update policy override default'),
('Pol_UpdatePolicy', 'Update policy override'),
('Pol_TargetVersionPrefix', 'Target version prefix override'),
('Pol_RollbackToTargetVersion', 'Rollback to Target version'),
('Part_AutoUpdateCheckPeriod', 'Minutes between update checks'),
('Part_UpdateCheckSuppressedStartHour',
'Hour in a day that start to suppress update check'),
('Part_UpdateCheckSuppressedStartMin',
'Minute in hour that starts to suppress update check'),
('Part_UpdateCheckSuppressedDurationMin',
'Number of minutes to suppress update check each day'),
('Part_ProxyMode', 'Choose how to specify proxy server settings'),
('Part_ProxyServer', 'Address or URL of proxy server'),
('Part_ProxyPacUrl', 'URL to a proxy .pac file'),
('Part_InstallPolicy', 'Policy'),
('Name_InstallsEnabled', 'Always allow Installs (recommended)'),
('Name_InstallsEnabledMachineOnly',
'Always allow Machine-Wide Installs, but not Per-User Installs.'),
('Name_InstallsDisabled', 'Installs disabled'),
('Part_UpdatePolicy', 'Policy'),
('Part_TargetVersionPrefix', 'Target version prefix'),
('Name_UpdatesEnabled', 'Always allow updates (recommended)'),
('Name_ManualUpdatesOnly', 'Manual updates only'),
('Name_AutomaticUpdatesOnly', 'Automatic silent updates only'),
('Name_UpdatesDisabled', 'Updates disabled'),
('ProxyDisabled_DropDown', 'Never use a proxy'),
('ProxyAutoDetect_DropDown', 'Auto detect proxy settings'),
('ProxyPacScript_DropDown', 'Use a .pac proxy script'),
('ProxyFixedServers_DropDown', 'Use fixed proxy servers'),
('ProxyUseSystem_DropDown', 'Use system proxy settings'),
('Explain_GoogleUpdate',
'Policies to control the installation and updating of Google applications '
'that use Google Update/Google Installer.'),
('Explain_Preferences', 'General policies for Google Update.'),
('Explain_AutoUpdateCheckPeriod',
'Minimum number of minutes between automatic update checks.\n\n'
'Set the value to 0 if you want to disable all auto-update checks '
'(not recommended).\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DownloadPreference',
'If enabled, the Google Update server will attempt to provide '
'cache-friendly URLs for update payloads in its responses.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_UpdateCheckSuppressedPeriod',
'If this setting is enabled, update checks will be suppressed during '
'each day starting from Hour:Minute for a period of Duration (in minutes).'
' Duration does not account for daylight savings time. So for instance, '
'if the start time is 22:00, and with a duration of 480 minutes, the '
'updates will be suppressed for 8 hours regardless of whether daylight '
'savings time changes happen in between.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyMode',
'Allows you to specify the proxy server used by Google Update.\n\n'
'If you choose to never use a proxy server and always connect directly, '
'all other options are ignored.\n\n'
'If you choose to use system proxy settings or auto detect the proxy '
'server, all other options are ignored.\n\n'
'If you choose fixed server proxy mode, you can specify further options '
'in \'Address or URL of proxy server\'.\n\n'
'If you choose to use a .pac proxy script, you must specify the URL to '
'the script in \'URL to a proxy .pac file\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyServer',
'You can specify the URL of the proxy server here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyPacUrl',
'You can specify a URL to a proxy .pac file here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_Applications', 'Policies for individual applications.\n\n'
'An updated ADMX/ADML template will be required to support '
'Google applications released in the future.'),
('Explain_DefaultAllowInstallation',
'Specifies the default behavior for whether Google software can be '
'installed using Google Update/Google Installer.\n\n'
'Can be overridden by the "Allow installation" for individual '
'applications.\n\n'
'Only affects installation of Google software using Google Update/Google '
'Installer. Cannot prevent running the application installer directly or '
'installation of Google software that does not use Google Update/Google '
'Installer for installation.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DefaultUpdatePolicy',
'Specifies the default policy for software updates from Google.\n\n'
'Can be overridden by the "Update policy override" for individual '
'applications.\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, either '
'by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user does a '
'manual update check. (Not all apps provide an interface for this.)\n'
' - Automatic silent updates only: Updates are only applied when they are '
'found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for updates '
'using each application\'s manual update mechanism if available. If you '
'disable updates, you should periodically check for updates and '
'distribute them to users.\n\n'
'Only affects updates for Google software that uses Google Update for '
'updates. Does not prevent auto-updates of Google software that does not '
'use Google Update for updates.\n\n'
'Updates for Google Update are not affected by this setting; Google '
'Update will continue to update itself while it is installed.\n\n'
'WARNING: Disabing updates will also prevent updates of any new Google '
'applications released in the future, possibly including dependencies for '
'future versions of installed applications.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
]
ADML_PRESENTATIONS = '''\
<presentation id="Pol_AutoUpdateCheckPeriod">
<decimalTextBox refId="Part_AutoUpdateCheckPeriod" defaultValue="1400"
spinStep="60">Minutes between update checks</decimalTextBox>
</presentation>
<presentation id="Pol_UpdateCheckSuppressedPeriod">
<decimalTextBox refId="Part_UpdateCheckSuppressedStartHour"
defaultValue="0" spinStep="1">Hour</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedStartMin"
defaultValue="0" spinStep="1">Minute</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedDurationMin"
defaultValue="60">Duration</decimalTextBox>
</presentation>
<presentation id="Pol_DownloadPreference">
<dropdownList refId="Part_DownloadPreference"
defaultItem="0">Type of download URL to request</dropdownList>
</presentation>
<presentation id="Pol_ProxyMode">
<dropdownList refId="Part_ProxyMode"
defaultItem="0">Choose how to specify proxy server settings
</dropdownList>
</presentation>
<presentation id="Pol_ProxyServer">
<textBox refId="Part_ProxyServer">
<label>Address or URL of proxy server</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_ProxyPacUrl">
<textBox refId="Part_ProxyPacUrl">
<label>URL to a proxy .pac file</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_DefaultAllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_DefaultUpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_AllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_UpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_TargetVersionPrefix">
<textBox refId="Part_TargetVersionPrefix">
<label>Target version prefix</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_RollbackToTargetVersion" />
'''
ADML_RESOURCE_TABLE_TEMPLATE = '''
<resources>
<stringTable>
%s
</stringTable>
<presentationTable>
%s
</presentationTable>
</resources>
'''
ADML_FOOTER = '</policyDefinitionResources>'
def GenerateGroupPolicyTemplateAdml(apps):
"""Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file.
"""
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if not rollback_disclaimer:
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = ('Cat_' + app_legal_id, app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (
'Explain_Install' + app_legal_id,
'Specifies whether %s can be installed using Google Update/Google '
'Installer.\n\n'
'If this policy is not configured, %s can be installed as specified '
'by "Allow installation default".\n\n'
'%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (
'Explain_AutoUpdate' + app_legal_id,
'Specifies how Google Update handles available %s updates '
'from Google.\n\n'
'If this policy is not configured, Google Update handles available '
'updates as specified by "Update policy override default".\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, '
'either by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user '
'does a manual update check. (Not all apps provide an interface '
' for this.)\n'
' - Automatic silent updates only: Updates are only applied when '
'they are found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for '
'updates using the application\'s manual update mechanism if '
'available. If you disable updates, you should periodically check '
'for updates and distribute them to users.%s\n\n'
'%s' %
(app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (
'Explain_TargetVersionPrefix' + app_legal_id,
'Specifies which version %s should be updated to.\n\n'
'When this policy is enabled, the app will be updated to the version '
'prefixed with this policy value.\n\nSome examples:\n'
'1) Not configured: app will be updated to the latest version '
'available.\n'
'2) Policy value is set to "55.": the app will be updated to any minor '
'version of 55 (e.g., 55.24.34 or 55.60.2).\n'
'3) Policy value is "55.2.": the app will be updated to any minor '
'version of 55.2 (e.g., 55.2.34 or 55.2.2).\n'
'4) Policy value is "55.24.34": the app will be updated to this '
'specific version only.\n\n'
'%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (
'Explain_RollbackToTargetVersion' + app_legal_id,
'Specifies that Google Update should roll installations of %s back to '
'the version indicated by "Target version prefix override".\n\n'
'This policy setting has no effect unless "Target version prefix '
'override" is set.\n\n'
'If this policy is not configured or is disabled, installs that have a '
'version higher than that specified by "Target version prefix '
'override" will be left as-is.\n\n'
'If this policy is enabled, installs that have a version higher than '
'that specified by "Target version prefix override" will be downgraded '
'to the highest available version that matches the target version.\n\n'
'%s\n\n'
'%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append(' <string id="%s">%s</string>' %
(entry[0], entry[1]))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE %
('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [
ADML_HEADER,
ADML_ENVIRONMENT,
app_resource_tables,
ADML_FOOTER,
]
return ''.join(target_contents)
def WriteGroupPolicyTemplateAdmx(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
def WriteGroupPolicyTemplateAdml(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo', '{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.',
'Disclaimer'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.',
''),
]
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, 'test_gold.admx')
output_path = os.path.join(module_dir, 'test_out.admx')
WriteGroupPolicyTemplateAdmx(output_path, TEST_APPS)
admx_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not admx_files_equal:
print('FAIL: ADMX files are not equal.')
gold_path = os.path.join(module_dir, 'test_gold.adml')
output_path = os.path.join(module_dir, 'test_out.adml')
WriteGroupPolicyTemplateAdml(output_path, TEST_APPS)
adml_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not adml_files_equal:
print('FAIL: ADML files are not equal.')
if admx_files_equal and adml_files_equal:
print('SUCCESS. contents are equal')
else:
sys.exit(-1)
| 40.820722 | 80 | 0.665336 | [
"Apache-2.0"
] | huhisoftware/omaha | omaha/enterprise/generate_group_policy_template_admx.py | 35,065 | Python |
from pytest_bdd import scenario, given, when, then, parsers
import contacts
@given("I have a contact book", target_fixture="contactbook")
def contactbook():
return contacts.Application()
@given(parsers.parse("I have a \"{contactname}\" contact"))
def have_a_contact(contactbook, contactname):
contactbook.add(contactname, "000")
@when(parsers.parse("I run the \"{command}\" command"))
def runcommand(contactbook, command):
contactbook.run(command)
| 24.578947 | 61 | 0.738758 | [
"MIT"
] | PacktPublishing/Crafting-Test-Driven-Software-with-Python | Chapter08/tests/acceptance/steps.py | 467 | Python |
from layer import *
class LogisticLayer(Layer):
def __init__(self, *args, **kwargs):
super(LogisticLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.LOGISTIC
def ApplyActivation(self):
cm.sigmoid(self.state)
def Sample(self):
self.state.sample_bernoulli(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_logistic_deriv(self.state)
def GetLoss(self, get_deriv=False, acc_deriv=False, **kwargs):
"""Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
if self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY:
data = self.data
state = self.state
temp1 = self.statesize
cm.cross_entropy_bernoulli(data, state, target=temp1, tiny=self.tiny)
perf.cross_entropy = temp1.sum()
cm.correct_preds(data, state, target=temp1, cutoff=0.5)
perf.correct_preds = temp1.sum()
if get_deriv:
self.state.subtract(self.data, target=self.deriv)
elif self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
target = self.statesize
self.state.subtract(self.data, target=target)
error = target.euclid_norm()**2
perf.error = error
if acc_deriv:
self.deriv.add_mult(target, alpha=self.loss_weight)
else:
self.deriv.assign(target)
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for logistic units.')
return perf
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means)
self.means_temp2.mult(self.means)
return self.means_temp2
| 30.4 | 80 | 0.68891 | [
"BSD-3-Clause"
] | Corvalius/deepnet | package/deepnet/logistic_layer.py | 2,128 | Python |
import os
from pathlib import Path
from typing import Any, Text, Dict
import pytest
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core.test import (
_create_data_generator,
_collect_story_predictions,
test as evaluate_stories,
FAILED_STORIES_FILE,
CONFUSION_MATRIX_STORIES_FILE,
REPORT_STORIES_FILE,
SUCCESSFUL_STORIES_FILE,
_clean_entity_results,
)
from rasa.core.policies.memoization import MemoizationPolicy
# we need this import to ignore the warning...
# noinspection PyUnresolvedReferences
from rasa.nlu.test import run_evaluation
from rasa.core.agent import Agent
from tests.core.conftest import (
DEFAULT_STORIES_FILE,
E2E_STORY_FILE_UNKNOWN_ENTITY,
END_TO_END_STORY_FILE,
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER,
STORY_FILE_TRIPS_CIRCUIT_BREAKER,
)
async def test_evaluation_file_creation(tmpdir: Path, default_agent: Agent):
failed_stories_path = str(tmpdir / FAILED_STORIES_FILE)
success_stories_path = str(tmpdir / SUCCESSFUL_STORIES_FILE)
report_path = str(tmpdir / REPORT_STORIES_FILE)
confusion_matrix_path = str(tmpdir / CONFUSION_MATRIX_STORIES_FILE)
await evaluate_stories(
stories=DEFAULT_STORIES_FILE,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
errors=True,
successes=True,
)
assert os.path.isfile(failed_stories_path)
assert os.path.isfile(success_stories_path)
assert os.path.isfile(report_path)
assert os.path.isfile(confusion_matrix_path)
@pytest.mark.parametrize(
"test_file", [END_TO_END_STORY_FILE, "data/test_evaluations/end_to_end_story.yml"]
)
async def test_end_to_end_evaluation_script(default_agent: Agent, test_file: Text):
generator = await _create_data_generator(test_file, default_agent, use_e2e=True)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
serialised_store = [
"utter_greet",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"utter_goodbye",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"greet",
"greet",
"default",
"goodbye",
"greet",
"default",
'[{"name": "Max"}]{"entity": "name", "value": "Max"}',
]
assert story_evaluation.evaluation_store.serialise()[0] == serialised_store
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 0
assert num_stories == 3
async def test_end_to_end_evaluation_script_unknown_entity(default_agent: Agent):
generator = await _create_data_generator(
E2E_STORY_FILE_UNKNOWN_ENTITY, default_agent, use_e2e=True
)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
assert story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 1
assert num_stories == 1
async def test_end_to_evaluation_with_forms(form_bot_agent: Agent):
generator = await _create_data_generator(
"data/test_evaluations/form-end-to-end-stories.md", form_bot_agent, use_e2e=True
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories = await _collect_story_predictions(
test_stories, form_bot_agent, use_e2e=True
)
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
async def test_source_in_failed_stories(tmpdir: Path, default_agent: Agent):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=E2E_STORY_FILE_UNKNOWN_ENTITY,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
)
failed_stories = rasa.shared.utils.io.read_file(stories_path)
assert (
f"story: simple_story_with_unknown_entity ({E2E_STORY_FILE_UNKNOWN_ENTITY})"
in failed_stories
)
async def test_end_to_evaluation_trips_circuit_breaker():
agent = Agent(
domain="data/test_domains/default.yml",
policies=[MemoizationPolicy(max_history=11)],
)
training_data = await agent.load_data(STORY_FILE_TRIPS_CIRCUIT_BREAKER)
agent.train(training_data)
generator = await _create_data_generator(
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER, agent, use_e2e=True
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories = await _collect_story_predictions(
test_stories, agent, use_e2e=True
)
circuit_trip_predicted = [
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"circuit breaker tripped",
"circuit breaker tripped",
]
assert (
story_evaluation.evaluation_store.action_predictions == circuit_trip_predicted
)
assert num_stories == 1
@pytest.mark.parametrize(
"text, entity, expected_entity",
[
(
"The first one please.",
{
"extractor": "DucklingEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": 1,
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"The first one please.",
{
"extractor": "CRFEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": "1",
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"Italian food",
{
"extractor": "DIETClassifier",
"entity": "cuisine",
"confidence": 0.99,
"start": 0,
"end": 7,
"value": "Italian",
},
{
"text": "Italian food",
"entity": "cuisine",
"start": 0,
"end": 7,
"value": "Italian",
},
),
],
)
def test_event_has_proper_implementation(
text: Text, entity: Dict[Text, Any], expected_entity: Dict[Text, Any]
):
actual_entities = _clean_entity_results(text, [entity])
assert actual_entities[0] == expected_entity
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_story.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_story.yml"),
],
)
async def test_retrieval_intent(response_selector_agent: Agent, test_file: Text):
generator = await _create_data_generator(
test_file, response_selector_agent, use_e2e=True,
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories = await _collect_story_predictions(
test_stories, response_selector_agent, use_e2e=True
)
# check that test story can either specify base intent or full retrieval intent
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_wrong_prediction.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_wrong_prediction.yml"),
],
)
async def test_retrieval_intent_wrong_prediction(
tmpdir: Path, response_selector_agent: Agent, test_file: Text
):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=test_file,
agent=response_selector_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=True,
)
failed_stories = rasa.shared.utils.io.read_file(stories_path)
# check if the predicted entry contains full retrieval intent
assert "# predicted: chitchat/ask_name" in failed_stories
| 29.945017 | 88 | 0.648152 | [
"Apache-2.0"
] | AnaCarolcs/rasa | tests/core/test_evaluation.py | 8,714 | Python |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 144250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 79.473054 | 614 | 0.80975 | [
"MIT"
] | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | log_mito_act/model_577.py | 13,272 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for VPC Service Controls Service Perimeters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.core import log
from googlecloudsdk.core import resources as core_resources
def _CreateServiceRestriction(restriction_message_type, mask_prefix,
enable_restriction, allowed_services):
"""Returns a service restriction message and its update mask."""
if allowed_services is None and enable_restriction is None:
return None, []
message = restriction_message_type()
update_mask = []
if allowed_services is not None:
message.allowedServices = allowed_services
update_mask.append('allowedServices')
if enable_restriction is not None:
message.enableRestriction = enable_restriction
update_mask.append('enableRestriction')
return message, ['{}.{}'.format(mask_prefix, item) for item in update_mask]
def _CreateServicePerimeterConfig(
messages, mask_prefix, include_unrestricted_services, resources,
restricted_services, unrestricted_services, levels,
ingress_allowed_services, vpc_allowed_services, bridge_allowed_services,
enable_ingress_service_restriction, enable_vpc_service_restriction,
enable_bridge_service_restriction):
"""Returns a ServicePerimeterConfig and its update mask."""
config = messages.ServicePerimeterConfig()
mask = []
if resources is not None:
mask.append('resources')
config.resources = resources
if include_unrestricted_services and unrestricted_services is not None:
mask.append('unrestrictedServices')
config.unrestrictedServices = unrestricted_services
if restricted_services is not None:
mask.append('restrictedServices')
config.restrictedServices = restricted_services
if levels is not None:
mask.append('accessLevels')
config.accessLevels = [l.RelativeName() for l in levels]
if (enable_ingress_service_restriction is not None or
ingress_allowed_services is not None):
config.ingressServiceRestriction, mask_updates = _CreateServiceRestriction(
messages.IngressServiceRestriction,
'ingressServiceRestriction',
enable_restriction=enable_ingress_service_restriction,
allowed_services=ingress_allowed_services)
mask += mask_updates
if (enable_vpc_service_restriction is not None or
vpc_allowed_services is not None):
config.vpcServiceRestriction, mask_updates = _CreateServiceRestriction(
messages.VpcServiceRestriction,
'vpcServiceRestriction',
enable_restriction=enable_vpc_service_restriction,
allowed_services=vpc_allowed_services)
mask += mask_updates
if (enable_bridge_service_restriction is not None or
bridge_allowed_services is not None):
config.bridgeServiceRestriction, mask_updates = _CreateServiceRestriction(
messages.BridgeServiceRestriction,
'bridgeServiceRestriction',
enable_restriction=enable_bridge_service_restriction,
allowed_services=bridge_allowed_services)
mask += mask_updates
if not mask:
return None, []
return config, ['{}.{}'.format(mask_prefix, item) for item in mask]
class Client(object):
"""High-level API client for VPC Service Controls Service Perimeters."""
def __init__(self, client=None, messages=None, version='v1'):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
self.include_unrestricted_services = {
'v1': False,
'v1alpha': True,
'v1beta': True
}[version]
def Get(self, zone_ref):
return self.client.accessPolicies_servicePerimeters.Get(
self.messages
.AccesscontextmanagerAccessPoliciesServicePerimetersGetRequest(
name=zone_ref.RelativeName()))
def Patch(self,
perimeter_ref,
description=None,
title=None,
perimeter_type=None,
resources=None,
restricted_services=None,
unrestricted_services=None,
levels=None,
ingress_allowed_services=None,
vpc_allowed_services=None,
bridge_allowed_services=None,
enable_ingress_service_restriction=None,
enable_vpc_service_restriction=None,
enable_bridge_service_restriction=None,
apply_to_dry_run_config=False,
clear_dry_run=False):
"""Patch a service perimeter.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch
description: str, description of the zone or None if not updating
title: str, title of the zone or None if not updating
perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
or None if not updating
resources: list of str, the names of resources (for now, just
'projects/...') in the zone or None if not updating.
restricted_services: list of str, the names of services
('example.googleapis.com') that *are* restricted by the access zone or
None if not updating.
unrestricted_services: list of str, the names of services
('example.googleapis.com') that *are not* restricted by the access zone
or None if not updating.
levels: list of Resource, the access levels (in the same policy) that must
be satisfied for calls into this zone or None if not updating.
ingress_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to use Access Levels to
make a cross access zone boundary call, or None if not updating.
vpc_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to be made within the
access zone, or None if not updating.
bridge_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to use the bridge access
zone, or None if not updating.
enable_ingress_service_restriction: bool, whether to restrict the set of
APIs callable outside the access zone via Access Levels, or None if not
updating.
enable_vpc_service_restriction: bool, whether to restrict the set of APIs
callable within the access zone, or None if not updating.
enable_bridge_service_restriction: bool, whether to restrict the set of
APIs callable using the bridge access zone, or None if not updating.
apply_to_dry_run_config: When true, the configuration will be place in the
'spec' field instead of the 'status' field of the Service Perimeter.
clear_dry_run: When true, the ServicePerimeterConfig field for dry-run
(i.e. 'spec') will be cleared and dryRun will be set to False.
Returns:
ServicePerimeter, the updated Service Perimeter.
"""
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = []
if description is not None:
update_mask.append('description')
perimeter.description = description
if title is not None:
update_mask.append('title')
perimeter.title = title
if perimeter_type is not None:
update_mask.append('perimeterType')
perimeter.perimeterType = perimeter_type
if not clear_dry_run:
mask_prefix = 'status' if not apply_to_dry_run_config else 'spec'
config, config_mask_additions = _CreateServicePerimeterConfig(
m, mask_prefix, self.include_unrestricted_services, resources,
restricted_services, unrestricted_services, levels,
ingress_allowed_services, vpc_allowed_services,
bridge_allowed_services, enable_ingress_service_restriction,
enable_vpc_service_restriction, enable_bridge_service_restriction)
if not apply_to_dry_run_config:
perimeter.status = config
else:
perimeter.dryRun = True
perimeter.spec = config
update_mask += config_mask_additions
if apply_to_dry_run_config and config_mask_additions:
update_mask.append('dryRun')
else:
update_mask.append('spec')
update_mask.append('dryRun')
perimeter.spec = None
perimeter.dryRun = False
update_mask.sort() # For ease-of-testing
# No update mask implies no fields were actually edited, so this is a no-op.
if not update_mask:
log.warning(
'The update specified results in an identical resource. Skipping request.'
)
return perimeter
request_type = (
m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest)
request = request_type(
servicePerimeter=perimeter,
name=perimeter_ref.RelativeName(),
updateMask=','.join(update_mask),
)
operation = self.client.accessPolicies_servicePerimeters.Patch(request)
poller = util.OperationPoller(self.client.accessPolicies_servicePerimeters,
self.client.operations, perimeter_ref)
operation_ref = core_resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))
| 40.534413 | 84 | 0.723332 | [
"MIT"
] | bopopescu/JobSniperRails | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/accesscontextmanager/zones.py | 10,012 | Python |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
class huobipro (Exchange):
def describe(self):
return self.deep_extend(super(huobipro, self).describe(), {
'id': 'huobipro',
'name': 'Huobi Pro',
'countries': 'CN',
'rateLimit': 2000,
'userAgent': self.userAgents['chrome39'],
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro',
'hasCORS': False,
# obsolete metainfo structure
'hasFetchOHLCV': True,
'hasFetchOrders': True,
'hasFetchOpenOrders': True,
# new metainfo structure
'has': {
'fetchOHCLV': True,
'fetchOrders': True,
'fetchOpenOrders': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766569-15aa7b9a-5edd-11e7-9e7f-44791f4ee49c.jpg',
'api': 'https://api.huobi.pro',
'www': 'https://www.huobi.pro',
'doc': 'https://github.com/huobiapi/API_Docs/wiki/REST_api_reference',
'fees': 'https://www.huobi.pro/about/fee/',
},
'api': {
'market': {
'get': [
'history/kline', # 获取K线数据
'detail/merged', # 获取聚合行情(Ticker)
'depth', # 获取 Market Depth 数据
'trade', # 获取 Trade Detail 数据
'history/trade', # 批量获取最近的交易记录
'detail', # 获取 Market Detail 24小时成交量数据
],
},
'public': {
'get': [
'common/symbols', # 查询系统支持的所有交易对
'common/currencys', # 查询系统支持的所有币种
'common/timestamp', # 查询系统当前时间
],
},
'private': {
'get': [
'account/accounts', # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance', # 查询指定账户的余额
'order/orders/{id}', # 查询某个订单详情
'order/orders/{id}/matchresults', # 查询某个订单的成交明细
'order/orders', # 查询当前委托、历史委托
'order/matchresults', # 查询当前成交、历史成交
'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址
],
'post': [
'order/orders/place', # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel', # 申请撤销一个订单请求
'order/orders/batchcancel', # 批量撤销订单
'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create', # 申请提现虚拟币
'dw/withdraw-virtual/create', # 申请提现虚拟币
'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现
'dw/withdraw-virtual/{id}/cancel', # 申请取消提现虚拟币
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
})
async def fetch_markets(self):
response = await self.publicGetCommonSymbols()
markets = response['data']
numMarkets = len(markets)
if numMarkets < 1:
raise ExchangeError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(response))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = market['base-currency']
quoteId = market['quote-currency']
base = baseId.upper()
quote = quoteId.upper()
id = baseId + quoteId
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount-precision'],
'price': market['price-precision'],
}
lot = math.pow(10, -precision['amount'])
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'lot': lot,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': lot,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = None
if market:
symbol = market['symbol']
last = None
if 'last' in ticker:
last = ticker['last']
timestamp = self.milliseconds()
if 'ts' in ticker:
timestamp = ticker['ts']
bid = None
ask = None
if 'bid' in ticker:
if ticker['bid']:
bid = self.safe_float(ticker['bid'], 0)
if 'ask' in ticker:
if ticker['ask']:
ask = self.safe_float(ticker['ask'], 0)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker['high'],
'low': ticker['low'],
'bid': bid,
'ask': ask,
'vwap': None,
'open': ticker['open'],
'close': ticker['close'],
'first': None,
'last': last,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['amount']),
'quoteVolume': ticker['vol'],
'info': ticker,
}
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDepth(self.extend({
'symbol': market['id'],
'type': 'step0',
}, params))
if 'tick' in response:
if not response['tick']:
raise ExchangeError(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
return self.parse_order_book(response['tick'], response['tick']['ts'])
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDetailMerged(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['tick'], market)
def parse_trade(self, trade, market):
timestamp = trade['ts']
return {
'info': trade,
'id': str(trade['id']),
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['direction'],
'price': trade['price'],
'amount': trade['amount'],
}
def parse_trades_data(self, data, market, since=None, limit=None):
result = []
for i in range(0, len(data)):
trades = self.parse_trades(data[i]['data'], market, since, limit)
for k in range(0, len(trades)):
result.append(trades[k])
return result
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetHistoryTrade(self.extend({
'symbol': market['id'],
'size': 2000,
}, params))
return self.parse_trades_data(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv['id'] * 1000,
ohlcv['open'],
ohlcv['high'],
ohlcv['low'],
ohlcv['close'],
ohlcv['amount'],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetHistoryKline(self.extend({
'symbol': market['id'],
'period': self.timeframes[timeframe],
'size': 2000, # max = 2000
}, params))
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def load_accounts(self, reload=False):
if reload:
self.accounts = await self.fetch_accounts()
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts()
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_accounts(self):
await self.load_markets()
response = await self.privateGetAccountAccounts()
return response['data']
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
response = await self.privateGetAccountAccountsIdBalance(self.extend({
'id': self.accounts[0]['id'],
}, params))
balances = response['data']['list']
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
uppercase = balance['currency'].upper()
currency = self.common_currency_code(uppercase)
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = float(balance['balance'])
if balance['type'] == 'frozen':
account['used'] = float(balance['balance'])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchOrders() requires a symbol parameter')
self.load_markets()
market = self.market(symbol)
status = None
if 'type' in params:
status = params['type']
elif 'status' in params:
status = params['status']
else:
raise ExchangeError(self.id + ' fetchOrders() requires type param or status param for spot market ' + symbol + '(0 or "open" for unfilled or partial filled orders, 1 or "closed" for filled orders)')
if (status == 0) or (status == 'open'):
status = 'submitted,partial-filled'
elif (status == 1) or (status == 'closed'):
status = 'filled,partial-canceled'
else:
raise ExchangeError(self.id + ' fetchOrders() wrong type param or status param for spot market ' + symbol + '(0 or "open" for unfilled or partial filled orders, 1 or "closed" for filled orders)')
response = await self.privateGetOrderOrders(self.extend({
'symbol': market['id'],
'states': status,
}))
return self.parse_orders(response['data'], market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
open = 0 # 0 for unfilled orders, 1 for filled orders
return self.fetch_orders(symbol, None, None, self.extend({
'status': open,
}, params))
def parse_order_status(self, status):
if status == 'partial-filled':
return 'open'
elif status == 'filled':
return 'closed'
elif status == 'canceled':
return 'canceled'
elif status == 'submitted':
return 'open'
return status
def parse_order(self, order, market=None):
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(order['state'])
symbol = None
if not market:
if 'symbol' in order:
if order['symbol'] in self.markets_by_id:
marketId = order['symbol']
market = self.markets_by_id[marketId]
if market:
symbol = market['symbol']
timestamp = order['created-at']
amount = float(order['amount'])
filled = float(order['field-amount'])
remaining = amount - filled
price = float(order['price'])
cost = float(order['field-cash-amount'])
average = 0
if filled:
average = float(cost / filled)
result = {
'info': order,
'id': order['id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
order = {
'account-id': self.accounts[0]['id'],
'amount': self.amount_to_precision(symbol, amount),
'symbol': market['id'],
'type': side + '-' + type,
}
if type == 'limit':
order['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrderOrdersPlace(self.extend(order, params))
return {
'info': response,
'id': response['data'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostOrderOrdersIdSubmitcancel({'id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
else:
url += self.version
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
timestamp = self.YmdHMS(self.milliseconds(), 'T')
request = self.keysort(self.extend({
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}, query))
auth = self.urlencode(request)
payload = '\n'.join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 38.368653 | 210 | 0.490075 | [
"MIT"
] | destenson/ccxt--ccxt | python/ccxt/async/huobipro.py | 17,885 | Python |
from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
class UsersSerializer(serializers.ModelSerializer):
"""Serializer for users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name', 'cf_handle', 'cf_pass',\
'uri_handle', 'uri_pass', 'uva_handle', 'uva_pass')
extra_kwargs = {
'password': {
'write_only': True,
'min_length': 5,
'style': {'input_type': 'password'}
},
'uva_pass': {
'write_only': True,
'style': {'input_type': 'password'}
},
'uri_pass': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create a new user with validated password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""update user data with encrypted password"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with the provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 32.552239 | 75 | 0.581843 | [
"MIT"
] | mazharkafi004/XOJ | xojbackend/app/user/serializers.py | 2,181 | Python |
"""
Django settings for queueMgmt project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n(ftor0xe)qon)ar53-($dc^o3-#4ikgml4#jema-&q7phi39_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'visitors.apps.VisitorsConfig',
'rest_framework',
'corsheaders'
]
MIDDLEWARE = [
# CORS
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'queueMgmt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'queueMgmt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'djongo',
'NAME': 'NEW_PATIENTS_DB',
'HOST': '127.0.0.1',
'PORT': 27017,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# FOR QR CODE
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_root")
# CORS
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = [
'http://localhost:4200',
'http://127.0.0.1:4200'
]
| 25.41844 | 91 | 0.686942 | [
"MIT"
] | maxhamz/prieds_test_hospital_queue_be | queueMgmt/settings.py | 3,584 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailabilitySetsOperations(object):
"""AvailabilitySetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
availability_set_name, # type: str
parameters, # type: "models.AvailabilitySet"
**kwargs # type: Any
):
# type: (...) -> "models.AvailabilitySet"
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailabilitySet"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailabilitySet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
availability_set_name, # type: str
parameters, # type: "models.AvailabilitySetUpdate"
**kwargs # type: Any
):
# type: (...) -> "models.AvailabilitySet"
"""Update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailabilitySet"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailabilitySetUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
availability_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
availability_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AvailabilitySet"
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailabilitySet"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
def list_by_subscription(
self,
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AvailabilitySetListResult"]
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailabilitySetListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AvailabilitySetListResult"]
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailabilitySetListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailabilitySetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
def list_available_sizes(
self,
resource_group_name, # type: str
availability_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualMachineSizeListResult"]
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineSizeListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_available_sizes.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'} # type: ignore
| 48.084168 | 204 | 0.661332 | [
"MIT"
] | 00Kai0/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_availability_sets_operations.py | 23,994 | Python |
import math
def newton(function,function1,startingInt): #function is the f(x) and function1 is the f'(x)
x_n=startingInt
while True:
x_n1=x_n-function(x_n)/function1(x_n)
if abs(x_n-x_n1)<0.00001:
return x_n1
x_n=x_n1
def f(x):
return math.pow(x,3)-2*x-5
def f1(x):
return 3*math.pow(x,2)-2
print(newton(f,f1,3)) | 21.294118 | 92 | 0.635359 | [
"MIT"
] | FaizAlam/Python | NeutonMethod.py | 362 | Python |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import functools
import operator
from oneflow.compatible import single_client as flow
import oneflow._oneflow_internal
from oneflow.compatible.single_client.python.framework import id_util as id_util
from oneflow.compatible.single_client.python.oneflow_export import (
oneflow_export,
stable_api,
)
from typing import Optional, Sequence, List
@oneflow_export("tensor_buffer_to_tensor")
@stable_api
def tensor_buffer_to_tensor(
x: oneflow._oneflow_internal.BlobDesc,
dtype: flow.dtype,
instance_shape: Sequence[int],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator converts the Blob's type from TensorBuffer to Tensor.
Some operator's output data type is `TensorBuffer`, you can use this operator to convert back
to `Tensor`.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
dtype (flow.dtype): The data dtype.
instance_shape (Sequence[int]): The shape of each TensorBuffer instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob`.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64)
"""
if name is None:
name = id_util.UniqueStr("TensorBufferToTensor_")
return (
flow.user_op_builder(name)
.Op("tensor_buffer_to_tensor")
.Input("in", [x])
.Output("out")
.Attr("dtype", dtype)
.Attr("instance_shape", instance_shape)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("tensor_to_tensor_buffer")
@stable_api
def tensor_to_tensor_buffer(
x: oneflow._oneflow_internal.BlobDesc,
instance_dims: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator converts the Blob's type from Tensor to TensorBuffer.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
instance_dims (int): The dimensions of dynamic tensor instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64)
"""
if name is None:
name = id_util.UniqueStr("TensorToTensorBuffer_")
return (
flow.user_op_builder(name)
.Op("tensor_to_tensor_buffer")
.Input("in", [x])
.Output("out")
.Attr("instance_dims", instance_dims)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("gen_tensor_buffer")
@stable_api
def gen_tensor_buffer(
shape: Sequence[int],
shape_list: Sequence[Sequence[int]],
value_list: Sequence[float],
data_type: Optional[flow.dtype] = flow.float32,
dynamic_out: Optional[bool] = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator generates a tensor buffer blob.
Args:
shape (Sequence[int]): shape of output blob
shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob
value_list (Sequence[float]): values for tensor buffer in output blob
data_type (Optional[flow.dtype]): data type for tensor buffer in output blob
dynamic_out (Optional[bool]): if output is a dynamic blob
name (Optional[str]): The name for the operation. Defaults to None.
Returns:
BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
@flow.global_function(function_config=func_config)
def GenTensorBufferJob():
with flow.scope.placement("cpu", "0:0"):
x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0])
y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True)
return y
# y_0.shape (2, 1), y_1.shape (1. 2)
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("GenTensorBuffer_")
)
.Op("gen_tensor_buffer")
.Output("out")
.Attr("shape", shape)
.Attr("shape_list", shape_list)
.Attr("value_list", value_list)
.Attr("data_type", data_type)
.Attr("dynamic_out", dynamic_out)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("tensor_buffer_to_list_of_tensors")
@stable_api
def tensor_buffer_to_list_of_tensors(
x: oneflow._oneflow_internal.BlobDesc,
out_shape: Sequence[int],
out_dtype: flow.dtype,
dynamic_out: Optional[bool] = False,
name: Optional[str] = None,
) -> List[oneflow._oneflow_internal.BlobDesc]:
r"""This operator converts the Blob of TensorBuffer to list of Tensors. Every element in x will be converted
to a Tensor and output will be flatten to a list.
Args:
x (BlobDesc): Input `Blob`, data type must be tensor buffer.
out_shape (Sequence[int]): max shape for a tensor buffer in x
out_dtype (flow.dtype,): output data type
dynamic_out (Optional[bool]): if output is dynamic blob. Default to False.
name (Optional[str]): The name for the operation. Default to None.
Returns:
List[BlobDesc]: result blobs
For example:
.. code-block:: python
# the same with `gen_tensor_buffer` op
"""
return (
flow.user_op_builder(
name
if name is not None
else id_util.UniqueStr("TensorBufferToListOfTensors_")
)
.Op("tensor_buffer_to_list_of_tensors")
.Input("in", [x])
.Output("out", functools.reduce(operator.mul, x.shape, 1))
.Attr("out_dtype", out_dtype)
.Attr("out_shape", out_shape)
.Attr("dynamic_out", dynamic_out)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
| 32.972549 | 126 | 0.644029 | [
"Apache-2.0"
] | xcnick/oneflow | oneflow/compatible_single_client_python/ops/tensor_buffer_ops.py | 8,408 | Python |
import multiprocessing as mp
import time
def f(name, timeout, queue):
time.sleep(timeout)
print('hello', name)
queue.put(name + ' done!')
queue = mp.SimpleQueue() # queue for communicating with the processes we will spawn
bob = mp.Process(target=f, args=('bob', 0.3, queue))
bob.start() # start the process
alice = mp.Process(target=f, args=('alice', 0.1, queue))
alice.start() # start the process
# wait for processes to complete
bob.join()
alice.join()
# print results from intercommunication object
for result in iter(queue.get, None):
print(result) | 25.086957 | 84 | 0.701906 | [
"MIT"
] | andgineer/masterandrey.com | _includes/src/multiprocessing.py | 577 | Python |
'''OpenGL extension EXT.histogram
This module customises the behaviour of the
OpenGL.raw.GL.EXT.histogram to provide a more
Python-friendly API
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.histogram import *
### END AUTOGENERATED SECTION
glGetHistogramParameterfvEXT = wrapper.wrapper(glGetHistogramParameterfvEXT).setOutput(
"params",(1,),
)
glGetHistogramParameterivEXT = wrapper.wrapper(glGetHistogramParameterivEXT).setOutput(
"params",(1,),
)
| 28.8 | 87 | 0.800347 | [
"MIT"
] | MontyThibault/centre-of-mass-awareness | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/EXT/histogram.py | 576 | Python |
# coding: utf-8
import unittest
from datetime import datetime
from whiskyton import app
from whiskyton.helpers import sitemap
from whiskyton.helpers.charts import Chart
from whiskyton.tests.config import WhiskytonTest
class TestHelpers(unittest.TestCase):
def setUp(self):
self.test_suite = WhiskytonTest()
self.app = self.test_suite.set_app(app)
def tearDown(self):
self.test_suite.unset_app()
# test methods from Whisky (whiskyton/models.py)
def test_slug(self):
whisky = self.test_suite.get_whisky(2)
self.assertEqual(whisky.get_slug(), 'glendeveronmacduff')
def test_get_tastes(self):
whisky = self.test_suite.get_whisky(2)
tastes = ['1', '1', '1', '1', '1', '3', '2', '1', '0', '2', '0', '2']
self.assertEqual(whisky.get_tastes(), tastes)
# test methods from Chart (whiskyton/helpers/charts.py)
def test_cache_path(self):
base_dir = app.config['BASEDIR']
cache_path = base_dir.child('whiskyton', 'static', 'charts')
chart = Chart()
self.assertEqual(str(cache_path), chart.cache_path())
def test_cache_name(self):
whisky_1, whisky_2 = self.test_suite.get_whiskies()
chart = Chart(reference=whisky_1, comparison=whisky_2)
cache_dir_path = chart.cache_path()
cache_file_path = chart.cache_name(True)
cache_name = chart.cache_name()
self.assertEqual(cache_name, '110113221101x111113210202.svg')
self.assertEqual(cache_file_path,
cache_dir_path.child(cache_name).absolute())
def test_create_and_cache(self):
base_dir = app.config['BASEDIR']
whisky_1, whisky_2 = self.test_suite.get_whiskies()
chart = Chart(reference=whisky_1, comparison=whisky_2)
contents = chart.create()
cached = chart.cache()
sample = base_dir.child('whiskyton', 'tests', 'chart_sample.svg')
self.assertEqual(contents, cached.read_file())
self.assertEqual(contents, sample.read_file())
# test methods from whiskyton/helpers/sitemap.py
def test_recursive_listdir(self):
sample_dir = app.config['BASEDIR'].child('whiskyton')
files = sitemap.recursive_listdir(sample_dir)
self.assertIsInstance(files, list)
for file_path in files:
self.assertTrue(file_path.exists())
self.assertTrue(file_path.isfile())
def test_most_recent_update(self):
output = sitemap.most_recent_update()
dt = datetime.strptime(output, '%Y-%m-%d')
self.assertIsInstance(dt, datetime)
| 35.712329 | 77 | 0.66705 | [
"MIT"
] | g4brielvs/whiskyton | whiskyton/tests/test_helpers.py | 2,607 | Python |
import os
import logging
from .paths import get_path
_FORMAT = '%(asctime)s:%(levelname)s:%(lineno)s:%(module)s.%(funcName)s:%(message)s'
_formatter = logging.Formatter(_FORMAT, '%H:%M:%S')
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
logging.basicConfig(filename=os.path.join(get_path(), 'spfeas.log'),
filemode='w',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.addHandler(_handler)
logger.setLevel(logging.INFO)
class CorruptedBandsError(OverflowError):
"""Raised when bands are corrupted"""
| 25.608696 | 84 | 0.706282 | [
"MIT"
] | alexchunet/Spa_feature_processing | spfeas/errors.py | 589 | Python |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
| 30.433735 | 127 | 0.533386 | [
"MIT"
] | AmineAitLemqeddem/sdia-python | src/lab2/box_window.py | 7,578 | Python |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='ultmul',
ext_modules=[
CUDAExtension(
'ultMul_cuda',
[
'ultMul_cuda.cpp',
'ultMul_cuda_kernel.cu', #.cpp and .cu file must have different name
])],
cmdclass = {
'build_ext': BuildExtension
}
)
| 18.941176 | 71 | 0.723602 | [
"Apache-2.0"
] | kingsley1989/Parallel-Ultrametric | setup.py | 322 | Python |
#!/usr/bin/env python
# Copyright Singapore-MIT Alliance for Research and Technology
import random
from town import Town_layout
from company import Registrar_of_companies
class Person:
def __init__(self, city, attrs=None):
self.name = "Person_%04d" % len(city.residents)
city.residents.append(self)
self.children = list()
self.father = None
self.mother = None
self.spouse = None
self.company = None
if not attrs:
# slightly more male than female in this city
self.is_male = True if random.random() < 0.50005 else False
# 60 % of population are married
self.is_married = True if random.random() < 0.6 else False
self.get_job()
self.address = random.choice(Town_layout.residential_locations)
# 80 % of population own a car
self.mode = "car" if random.random() < 0.8 else "walk"
self.passengers = list()
if self.is_married:
attrs = dict()
attrs["sex"] = not self.is_male
attrs["is_married"] = True
attrs["spouse"] = self
attrs["address"] = self.address
self.spouse = Person(city, attrs)
child_count = random.randint(0, 3)
if child_count:
attrs = dict()
if self.is_male:
attrs["father"] = self
attrs["mother"] = self.spouse
else:
attrs["mother"] = self
attrs["father"] = self.spouse
attrs["address"] = self.address
child = Person(city, attrs)
self.add_child(child)
self.spouse.add_child(child)
elif "spouse" in attrs:
self.is_male = attrs["sex"]
self.is_married = attrs["is_married"]
self.spouse = attrs["spouse"]
self.address = attrs["address"]
# 90 % of spouses are also working.
if random.random() < 0.9:
self.get_job()
# 80 % of population own a car
self.mode = "car" if random.random() < 0.8 else "walk"
self.passengers = list()
else:
self.father = attrs["father"]
self.mother = attrs["mother"]
self.address = attrs["address"]
# child's age is between 1 to 18. If child is 7 years old or younger,
# then child stays at home.
self.is_student = True if random.randint(1, 18) > 7 else False
if self.is_student:
self.school = random.choice(Registrar_of_companies.schools)
self.school.add_student(self)
# if "car" == self.father.mode or "car" == self.mother.mode:
# # 80 % chance of getting parent to drive child to school if both have cars;
# # 40 % chance if only one parent has a car
# if "car" == self.father.mode and "car" == self.mother.mode:
# self.mode = "car" if random.random() < 0.8 else "walk"
# self.driver = self.father if random.random() < 0.5 else self.mother
# else:
# self.mode = "car" if random.random() < 0.4 else "walk"
# self.driver = self.father if "car" == self.father.mode else self.mother
# self.driver.passengers.append(self)
# else:
# self.mode = "walk"
self.mode = "walk"
def add_child(self, child):
self.children.append(child)
def get_job(self):
prob = 0.9 if self.is_male else 0.8
if random.random() < prob:
self.company = Registrar_of_companies.find_work(self)
def __repr__(self):
if self.father:
bio_data = "Child name='%s' father='%s' mother='%s'" \
% (self.name, self.father.name, self.mother.name)
if self.is_student:
bio_data += " school='%s' mode='%s'" % (self.school.name, self.mode)
else:
bio_data += " is-toddler"
return bio_data
elif not self.is_married:
bio_data = "Person name='%s' %s single mode='%s'" \
% (self.name, "male" if self.is_male else "female", self.mode)
if self.company:
bio_data += " company='%s'" % self.company.name
else:
bio_data += " stay-at-home"
return bio_data
else:
bio_data = "Person name='%s' %s married spouse='%s'" \
% (self.name, "male" if self.is_male else "female", self.spouse.name)
if self.company:
bio_data += " company='%s'" % self.company.name
else:
bio_data += " stay-at-home"
for child in self.children:
bio_data += " child='%s'" % child.name
bio_data += " mode='%s'" % self.mode
return bio_data
| 40.4375 | 96 | 0.505216 | [
"MIT"
] | ZhaoBozhi/simmobility-prod | dev/tools/snake-city/person.py | 5,176 | Python |
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Differential Privacy Library for Python
=======================================
The IBM Differential Privacy Library is a library for writing, executing and experimenting with differential privacy.
The Library includes a basic differential privacy mechanisms, the building blocks of differential privacy; tools for
basic data analysis with differential privacy; and machine learning models that satisfy differential privacy.
"""
from diffprivlib import mechanisms
from diffprivlib import models
from diffprivlib import tools
__version__ = '0.2.0'
| 51.75 | 120 | 0.778986 | [
"MIT"
] | Jakondak/differential-privacy-library | diffprivlib/__init__.py | 1,656 | Python |
#!/usr/bin/env python
import telnetlib
import time
import socket
import sys
TELNET_PORT = 23
TELNET_TIMEOUT = 6
## function
def send_command(remote_conn, cmd):
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output = remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def telnet_connection(ip_addr):
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed-out, IP isn't reachable")
def main():
ip_addr = '184.105.247.70'
username = 'pyclass'
password = '88newclass'
remote_conn = telnet_connection(ip_addr)
output = login(remote_conn, username, password)
time.sleep(1)
output = remote_conn.read_very_eager()
output = send_command(remote_conn, 'terminal length 0')
output = send_command(remote_conn, 'show ip int brief')
print '\n', output, '\n'
remote_conn.close()
if __name__ == "__main__":
main()
| 21.584906 | 63 | 0.738636 | [
"Apache-2.0"
] | blaforest/pynet | Lesson2Number2a_telnetlib.py | 1,144 | Python |
import glob
import numpy as np
import os.path as osp
from PIL import Image
import random
import struct
from torch.utils.data import Dataset
import scipy.ndimage as ndimage
import cv2
from skimage.measure import block_reduce
import json
import scipy.ndimage as ndimage
class ConcatDataset(Dataset ):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets )
def __len__(self ):
return max(len(d) for d in self.datasets )
class NYULoader(Dataset ):
def __init__(self, imRoot, normalRoot, depthRoot, segRoot,
imHeight = 480, imWidth = 640,
imWidthMax = 600, imWidthMin = 560,
phase='TRAIN', rseed = None ):
self.imRoot = imRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.phase = phase.upper()
self.imWidthMax = imWidthMax
self.imWidthMin = imWidthMin
if phase == 'TRAIN':
with open('NYUTrain.txt', 'r') as fIn:
imList = fIn.readlines()
self.imList = [osp.join(self.imRoot, x.strip() ) for x in imList ]
elif phase == 'TEST':
with open('NYUTest.txt', 'r') as fIn:
imList = fIn.readlines()
self.imList = [osp.join(self.imRoot, x.strip() ) for x in imList ]
self.normalList = [x.replace(imRoot, normalRoot) for x in self.imList ]
self.segList = [x.replace(imRoot, segRoot) for x in self.imList ]
self.depthList = [x.replace(imRoot, depthRoot).replace('.png', '.tiff') for x in self.imList]
print('Image Num: %d' % len(self.imList) )
# Permute the image list
self.count = len(self.imList )
self.perm = list(range(self.count ) )
if rseed is not None:
random.seed(0)
random.shuffle(self.perm )
def __len__(self):
return len(self.perm )
def __getitem__(self, ind):
ind = (ind % len(self.perm) )
if ind == 0:
random.shuffle(self.perm )
if self.phase == 'TRAIN':
scale = np.random.random();
imCropWidth = int( np.round( (self.imWidthMax - self.imWidthMin ) * scale + self.imWidthMin ) )
imCropHeight = int( float(self.imHeight) / float(self.imWidth ) * imCropWidth )
rs = int(np.round( (480 - imCropHeight) * np.random.random() ) )
re = rs + imCropHeight
cs = int(np.round( (640 - imCropWidth) * np.random.random() ) )
ce = cs + imCropWidth
elif self.phase == 'TEST':
imCropWidth = self.imWidth
imCropHeight = self.imHeight
rs, re, cs, ce = 0, 480, 0, 640
segNormal = 0.5 * ( self.loadImage(self.segList[self.perm[ind] ], rs, re, cs, ce) + 1)[0:1, :, :]
# Read Image
im = 0.5 * (self.loadImage(self.imList[self.perm[ind] ], rs, re, cs, ce, isGama = True ) + 1)
# normalize the normal vector so that it will be unit length
normal = self.loadImage( self.normalList[self.perm[ind] ], rs, re, cs, ce )
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-5) )[np.newaxis, :]
# Read depth
depth = self.loadDepth(self.depthList[self.perm[ind] ], rs, re, cs, ce )
if imCropHeight != self.imHeight or imCropWidth != self.imWidth:
depth = np.squeeze(depth, axis=0)
depth = cv2.resize(depth, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)
depth = depth[np.newaxis, :, :]
segDepth = np.logical_and(depth > 1, depth < 10).astype(np.float32 )
if imCropHeight != self.imHeight or imCropWidth != self.imWidth:
normal = normal.transpose([1, 2, 0] )
normal = cv2.resize(normal, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)
normal = normal.transpose([2, 0, 1] )
normal = normal / np.maximum(np.sqrt(np.sum(normal * normal, axis=0 )[np.newaxis, :, :] ), 1e-5)
if imCropHeight != self.imHeight or imCropWidth != self.imWidth:
segNormal = np.squeeze(segNormal, axis=0)
segNormal = cv2.resize(segNormal, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)
segNormal = segNormal[np.newaxis, :, :]
im = im.transpose([1, 2, 0] )
im = cv2.resize(im, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR )
im = im.transpose([2, 0, 1] )
if self.phase == 'TRAIN':
if np.random.random() > 0.5:
normal = np.ascontiguousarray(normal[:, :, ::-1] )
normal[0, :, :] = -normal[0, :, :]
depth = np.ascontiguousarray(depth[:, :, ::-1] )
segNormal = np.ascontiguousarray(segNormal[:, :, ::-1] )
segDepth = np.ascontiguousarray(segDepth[:, :, ::-1] )
im = np.ascontiguousarray(im[:, :, ::-1] )
scale = 1 + ( np.random.random(3) * 0.4 - 0.2 )
scale = scale.reshape([3, 1, 1] )
im = im * scale
batchDict = {'normal': normal,
'depth': depth,
'segNormal': segNormal,
'segDepth': segDepth,
'im': im.astype(np.float32 ),
'name': self.imList[self.perm[ind] ]
}
return batchDict
def loadImage(self, imName, rs, re, cs, ce, isGama = False):
if not(osp.isfile(imName ) ):
print(imName )
assert(False )
im = cv2.imread(imName)
if len(im.shape ) == 3:
im = im[:, :, ::-1]
im = im[rs:re, cs:ce, :]
im = np.ascontiguousarray(im.astype(np.float32 ) )
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1] )
return im
def loadDepth(self, imName, rs, re, cs, ce ):
if not osp.isfile(imName):
print(imName )
assert(False )
im = cv2.imread(imName, -1)
im = im[rs:re, cs:ce]
im = im[np.newaxis, :, :]
return im
| 36.074713 | 110 | 0.547873 | [
"MIT"
] | Z7Gao/InverseRenderingOfIndoorScene | nyuDataLoader.py | 6,277 | Python |
# --------------------------------------------------------
# Adapted from Faster R-CNN (https://github.com/rbgirshick/py-faster-rcnn)
# Written by Danfei Xu
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
#from datasets.viz import viz_scene_graph
import data_utils
from IPython import embed
from utils.timer import Timer
def get_minibatch(roidb, num_classes):
"""Given a mini batch of roidb, construct a data blob from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
im_timer = Timer()
im_timer.tic()
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
im_timer.toc()
blobs = {'ims': im_blob}
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
rels_blob = np.zeros((0, 3), dtype=np.int32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
box_idx_offset = 0
d_timer = Timer()
d_timer.tic()
for im_i in xrange(num_images):
# sample graph
roi_inds, rels = _sample_graph(roidb[im_i],
fg_rois_per_image,
rois_per_image,
num_neg_rels=cfg.TRAIN.NUM_NEG_RELS)
if rels.size == 0:
print('batch skipped')
return None
# gather all samples based on the sampled graph
rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights =\
_gather_samples(roidb[im_i], roi_inds, rels, num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1)) #im id for roi_pooling
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
all_overlaps = np.hstack((all_overlaps, overlaps))
# offset the relationship reference idx the number of previously
# added box
rels_offset = rels.copy()
rels_offset[:, :2] += box_idx_offset
rels_blob = np.vstack([rels_blob, rels_offset])
box_idx_offset += rois.shape[0]
#viz_inds = np.where(overlaps == 1)[0] # ground truth
#viz_inds = npr.choice(np.arange(rois.shape[0]), size=50, replace=False) # random sample
#viz_inds = np.where(overlaps > cfg.TRAIN.FG_THRESH)[0] # foreground
#viz_scene_graph(im_blob[im_i], rois, labels, viz_inds, rels)
blobs['rois'] = rois_blob.copy()
blobs['labels'] = labels_blob.copy().astype(np.int32)
blobs['relations'] = rels_blob[:,:2].copy().astype(np.int32)
blobs['predicates'] = rels_blob[:,2].copy().astype(np.int32)
blobs['bbox_targets'] = bbox_targets_blob.copy()
blobs['bbox_inside_weights'] = bbox_inside_blob.copy()
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32).copy()
num_roi = rois_blob.shape[0]
num_rel = rels_blob.shape[0]
blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel,
rois_blob,
rels_blob)
d_timer.toc()
graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2])
for k in graph_dict:
blobs[k] = graph_dict[k]
return blobs
def _gather_samples(roidb, roi_inds, rels, num_classes):
"""
join all samples and produce sampled items
"""
rois = roidb['boxes']
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
# decide bg rois
bg_inds = np.where(overlaps < cfg.TRAIN.FG_THRESH)[0]
labels = labels.copy()
labels[bg_inds] = 0
labels = labels[roi_inds]
# print('num bg = %i' % np.where(labels==0)[0].shape[0])
# rois and bbox targets
overlaps = overlaps[roi_inds]
rois = rois[roi_inds]
# convert rel index
roi_ind_map = {}
for i, roi_i in enumerate(roi_inds):
roi_ind_map[roi_i] = i
for i, rel in enumerate(rels):
rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][roi_inds, :], num_classes)
return rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128):
"""
Sample a graph from the foreground rois of an image
roidb: roidb of an image
rois_per_image: maximum number of rois per image
"""
gt_rels = roidb['gt_relations']
# index of assigned gt box for foreground boxes
fg_gt_ind_assignments = roidb['fg_gt_ind_assignments']
# find all fg proposals that are mapped to a gt
gt_to_fg_roi_inds = {}
all_fg_roi_inds = []
for ind, gt_ind in fg_gt_ind_assignments.items():
if gt_ind not in gt_to_fg_roi_inds:
gt_to_fg_roi_inds[gt_ind] = []
gt_to_fg_roi_inds[gt_ind].append(ind)
all_fg_roi_inds.append(ind)
# print('gt rois = %i' % np.where(roidb['max_overlaps']==1)[0].shape[0])
# print('assigned gt = %i' % len(gt_to_fg_roi_inds.keys()))
# dedup the roi inds
all_fg_roi_inds = np.array(list(set(all_fg_roi_inds)))
# find all valid relations in fg objects
pos_rels = []
for rel in gt_rels:
for sub_i in gt_to_fg_roi_inds[rel[0]]:
for obj_i in gt_to_fg_roi_inds[rel[1]]:
pos_rels.append([sub_i, obj_i, rel[2]])
# print('num fg rois = %i' % all_fg_roi_inds.shape[0])
rels = []
rels_inds = []
roi_inds = []
if len(pos_rels) > 0:
# de-duplicate the relations
_, indices = np.unique(["{} {}".format(i, j) for i,j,k in pos_rels], return_index=True)
pos_rels = np.array(pos_rels)[indices, :]
# print('num pos rels = %i' % pos_rels.shape[0])
# construct graph based on valid relations
for rel in pos_rels:
roi_inds += rel[:2].tolist()
roi_inds = list(set(roi_inds)) # keep roi inds unique
rels.append(rel)
rels_inds.append(rel[:2].tolist())
if len(roi_inds) >= num_fg_rois:
break
# print('sampled rels = %i' % len(rels))
roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds)
num_rois_to_sample = min(num_fg_rois - len(roi_inds), len(roi_candidates))
# if not enough rois, sample fg rois
if num_rois_to_sample > 0:
roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample,
replace=False)
roi_inds = np.hstack([roi_inds, roi_sample])
# sample background relations
sample_rels = []
sample_rels_inds = []
for i in roi_inds:
for j in roi_inds:
if i != j and [i, j] not in rels_inds:
sample_rels.append([i,j,0])
sample_rels_inds.append([i,j])
if len(sample_rels) > 0:
# randomly sample negative edges to prevent no edges
num_neg_rels = np.minimum(len(sample_rels), num_neg_rels)
inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False)
rels += [sample_rels[i] for i in inds]
rels_inds += [sample_rels_inds[i] for i in inds]
# if still not enough rois, sample bg rois
num_rois_to_sample = num_rois - len(roi_inds)
if num_rois_to_sample > 0:
bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample)
roi_inds = np.hstack([roi_inds, bg_roi_inds])
roi_inds = np.array(roi_inds).astype(np.int64)
# print('sampled rois = %i' % roi_inds.shape[0])
return roi_inds.astype(np.int64), np.array(rels).astype(np.int64)
def _sample_bg_rois(roidb, num_bg_rois):
"""
Sample rois from background
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where(((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO)) |
(labels == 0))[0]
bg_rois_per_this_image = np.minimum(num_bg_rois, bg_inds.size)
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return bg_inds
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = roidb[i]['image']() # use image getter
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind].astype(np.int64)
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
| 37.043046 | 96 | 0.633146 | [
"MIT"
] | Alex-Sol/scene-graph-TF-release | lib/roi_data_layer/minibatch.py | 11,187 | Python |
from __future__ import division, print_function
from openmdao.utils.assert_utils import assert_rel_error
import unittest
import numpy as np
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
from openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, DirectSolver, LinearBlockGS, PetscKSP, ScipyOptimizeDriver
class Test(unittest.TestCase):
def test(self):
# Create a dictionary to store options about the surface
mesh_dict = {'num_y' : 7,
'num_x' : 2,
'wing_type' : 'CRM',
'symmetry' : True,
'num_twist_cp' : 5}
mesh, twist_cp = generate_mesh(mesh_dict)
surf_dict = {
# Wing definition
'name' : 'wing', # name of the surface
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'wetted', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',
'twist_cp' : twist_cp,
'mesh' : mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True, # if true, compute viscous drag
'with_wave' : False, # if true, compute wave drag
}
# Create a dictionary to store options about the surface
mesh_dict = {'num_y' : 7,
'num_x' : 2,
'wing_type' : 'rect',
'symmetry' : True,
'offset' : np.array([50, 0., 0.])}
mesh = generate_mesh(mesh_dict)
surf_dict2 = {
# Wing definition
'name' : 'tail', # name of the surface
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'wetted', # how we compute the wing area,
# can be 'wetted' or 'projected'
'twist_cp' : twist_cp,
'mesh' : mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.0, # CD of the surface at alpha=0
'fem_origin' : 0.35,
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True, # if true, compute viscous drag
'with_wave' : False, # if true, compute wave drag
}
surfaces = [surf_dict, surf_dict2]
# Create the problem and the model group
prob = Problem()
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('v', val=248.136, units='m/s')
indep_var_comp.add_output('alpha', val=5., units='deg')
indep_var_comp.add_output('Mach_number', val=0.84)
indep_var_comp.add_output('re', val=1.e6, units='1/m')
indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')
indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')
prob.model.add_subsystem('prob_vars',
indep_var_comp,
promotes=['*'])
# Loop over each surface in the surfaces list
for surface in surfaces:
geom_group = Geometry(surface=surface)
# Add tmp_group to the problem as the name of the surface.
# Note that is a group and performance group for each
# individual surface.
prob.model.add_subsystem(surface['name'], geom_group)
# Loop through and add a certain number of aero points
for i in range(1):
# Create the aero point group and add it to the model
aero_group = AeroPoint(surfaces=surfaces)
point_name = 'aero_point_{}'.format(i)
prob.model.add_subsystem(point_name, aero_group)
# Connect flow properties to the analysis point
prob.model.connect('v', point_name + '.v')
prob.model.connect('alpha', point_name + '.alpha')
prob.model.connect('Mach_number', point_name + '.Mach_number')
prob.model.connect('re', point_name + '.re')
prob.model.connect('rho', point_name + '.rho')
prob.model.connect('cg', point_name + '.cg')
# Connect the parameters within the model for each aero point
for surface in surfaces:
name = surface['name']
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')
prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')
# Set up the problem
prob.setup()
prob.run_model()
assert_rel_error(self, prob['aero_point_0.wing_perf.CD'][0], 0.037210478659832125, 1e-6)
assert_rel_error(self, prob['aero_point_0.wing_perf.CL'][0], 0.5124736932248048, 1e-6)
assert_rel_error(self, prob['aero_point_0.CM'][1], -1.7028233361964462, 1e-6)
if __name__ == '__main__':
unittest.main()
| 44.714286 | 182 | 0.526624 | [
"Apache-2.0"
] | William-Metz/OASFORNEURALNETWORKS | openaerostruct/tests/test_multiple_aero_analysis.py | 7,512 | Python |
# Generated by Django 2.2.10 on 2020-04-12 20:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0036_auto_20200407_1947'),
]
operations = [
migrations.CreateModel(
name='Messanger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('whatsapp', models.BooleanField(default=True)),
('telegram', models.BooleanField(default=False)),
('viber', models.BooleanField(default=False)),
],
),
]
| 28.26087 | 114 | 0.581538 | [
"MIT"
] | SlavaSkvortsov/micro-shop | micro_shop/staff/migrations/0037_messanger.py | 650 | Python |
context = "https://www.w3.org/ns/activitystreams"
class Actor:
def __init__(self, user):
self.user = user
def render(self, base_url):
actor = self.user.profile
actor.update({
"@context": context,
"preferredUsername": self.user.name,
"id": self.user.uri,
"following": self.user.following,
"followers": self.user.followers,
"inbox": self.user.inbox,
"outbox": self.user.outbox,
"liked": self.user.liked,
"url": self.user.uri,
"manuallyApprovesFollowers": False,
"publicKey": self.user.key.to_dict(),
"endpoints": {
# "sharedInbox": f"{base_url}/inbox"
"oauthTokenEndpoint": f"{base_url}/token"
}
})
return actor
def webfinger(self, resource):
return {
"subject": resource,
"aliases": [
self.user.alias,
self.user.uri
],
"links": [
# {
# "rel": "http://webfinger.net/rel/profile-page",
# "type": "text/html",
# "href": "{method}://mastodon.social/@user"
# },
{
"rel": "self",
"type": "application/activity+json",
"href": self.user.uri
}
# {
# "rel": "magic-public-key",
# "href": self.user.key.to_magic_key()
# },
]
}
def ordered_collection(coll_id, total, page, data):
collection = {
"@context": context
}
collection_page = {
"id": f"{coll_id}?page={page}",
"partOf": coll_id,
"totalItems": len(data),
"type": "OrderedCollectionPage",
"orderedItems": data,
}
if data:
collection["next"] = f"{coll_id}?page={page + 1}"
if total:
collection.update({
"id": coll_id,
"totalItems": total,
"type": "OrderedCollection",
})
if data:
collection["first"] = collection_page
else:
collection.update(collection_page)
return collection
| 25.932584 | 69 | 0.454939 | [
"BSD-3-Clause"
] | autogestion/pubgate | pubgate/renders.py | 2,308 | Python |
import pytest
from eth_utils import (
is_same_address,
)
from web3.utils.events import (
get_event_data,
)
# Ignore warning in pyethereum 1.6 - will go away with the upgrade
pytestmark = pytest.mark.filterwarnings("ignore:implicit cast from 'char *'")
@pytest.fixture()
def Emitter(web3, EMITTER):
return web3.eth.contract(**EMITTER)
@pytest.fixture()
def emitter(web3, Emitter, wait_for_transaction, wait_for_block, address_conversion_func):
wait_for_block(web3)
deploy_txn_hash = Emitter.constructor().transact({'from': web3.eth.coinbase, 'gas': 1000000})
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn_hash)
contract_address = address_conversion_func(deploy_receipt['contractAddress'])
bytecode = web3.eth.getCode(contract_address)
assert bytecode == Emitter.bytecode_runtime
_emitter = Emitter(address=contract_address)
assert _emitter.address == contract_address
return _emitter
@pytest.mark.parametrize(
'contract_fn,event_name,call_args,expected_args',
(
('logNoArgs', 'LogAnonymous', [], {}),
('logNoArgs', 'LogNoArguments', [], {}),
('logSingle', 'LogSingleArg', [12345], {'arg0': 12345}),
('logSingle', 'LogSingleWithIndex', [12345], {'arg0': 12345}),
('logSingle', 'LogSingleAnonymous', [12345], {'arg0': 12345}),
('logDouble', 'LogDoubleArg', [12345, 54321], {'arg0': 12345, 'arg1': 54321}),
('logDouble', 'LogDoubleAnonymous', [12345, 54321], {'arg0': 12345, 'arg1': 54321}),
('logDouble', 'LogDoubleWithIndex', [12345, 54321], {'arg0': 12345, 'arg1': 54321}),
(
'logTriple',
'LogTripleArg',
[12345, 54321, 98765],
{'arg0': 12345, 'arg1': 54321, 'arg2': 98765},
),
(
'logTriple',
'LogTripleWithIndex',
[12345, 54321, 98765],
{'arg0': 12345, 'arg1': 54321, 'arg2': 98765},
),
(
'logQuadruple',
'LogQuadrupleArg',
[12345, 54321, 98765, 56789],
{'arg0': 12345, 'arg1': 54321, 'arg2': 98765, 'arg3': 56789},
),
(
'logQuadruple',
'LogQuadrupleWithIndex',
[12345, 54321, 98765, 56789],
{'arg0': 12345, 'arg1': 54321, 'arg2': 98765, 'arg3': 56789},
),
)
)
def test_event_data_extraction(web3,
emitter,
wait_for_transaction,
emitter_log_topics,
emitter_event_ids,
contract_fn,
event_name,
call_args,
expected_args):
function = getattr(emitter.functions, contract_fn)
event_id = getattr(emitter_event_ids, event_name)
txn_hash = function(event_id, *call_args).transact()
txn_receipt = wait_for_transaction(web3, txn_hash)
assert len(txn_receipt['logs']) == 1
log_entry = txn_receipt['logs'][0]
event_abi = emitter._find_matching_event_abi(event_name)
event_topic = getattr(emitter_log_topics, event_name)
is_anonymous = event_abi['anonymous']
if is_anonymous:
assert event_topic not in log_entry['topics']
else:
assert event_topic in log_entry['topics']
event_data = get_event_data(event_abi, log_entry)
assert event_data['args'] == expected_args
assert event_data['blockHash'] == txn_receipt['blockHash']
assert event_data['blockNumber'] == txn_receipt['blockNumber']
assert event_data['transactionIndex'] == txn_receipt['transactionIndex']
assert is_same_address(event_data['address'], emitter.address)
assert event_data['event'] == event_name
def test_dynamic_length_argument_extraction(web3,
emitter,
wait_for_transaction,
emitter_log_topics,
emitter_event_ids):
string_0 = "this-is-the-first-string-which-exceeds-32-bytes-in-length"
string_1 = "this-is-the-second-string-which-exceeds-32-bytes-in-length"
txn_hash = emitter.functions.logDynamicArgs(string_0, string_1).transact()
txn_receipt = wait_for_transaction(web3, txn_hash)
assert len(txn_receipt['logs']) == 1
log_entry = txn_receipt['logs'][0]
event_abi = emitter._find_matching_event_abi('LogDynamicArgs')
event_topic = emitter_log_topics.LogDynamicArgs
assert event_topic in log_entry['topics']
string_0_topic = web3.sha3(text=string_0)
assert string_0_topic in log_entry['topics']
event_data = get_event_data(event_abi, log_entry)
expected_args = {
"arg0": string_0_topic,
"arg1": string_1,
}
assert event_data['args'] == expected_args
assert event_data['blockHash'] == txn_receipt['blockHash']
assert event_data['blockNumber'] == txn_receipt['blockNumber']
assert event_data['transactionIndex'] == txn_receipt['transactionIndex']
assert is_same_address(event_data['address'], emitter.address)
assert event_data['event'] == 'LogDynamicArgs'
| 36.908451 | 97 | 0.617058 | [
"MIT"
] | JulianLiu/web3.py | tests/core/contracts/test_extracting_event_data_old.py | 5,241 | Python |
# Generated by Django 3.1.4 on 2020-12-31 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AsnDetailModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asn_code', models.CharField(max_length=255, verbose_name='ASN Code')),
('asn_status', models.IntegerField(default=1, verbose_name='ASN Status')),
('supplier', models.CharField(max_length=255, verbose_name='ASN Supplier')),
('goods_code', models.CharField(max_length=255, verbose_name='Goods Code')),
('goods_qty', models.IntegerField(default=0, verbose_name='Goods QTY')),
('goods_actual_qty', models.IntegerField(default=0, verbose_name='Goods Actual QTY')),
('sorted_qty', models.IntegerField(default=0, verbose_name='Sorted QTY')),
('goods_shortage_qty', models.IntegerField(default=0, verbose_name='Goods Shortage QTY')),
('goods_more_qty', models.IntegerField(default=0, verbose_name='Goods More QTY')),
('goods_damage_qty', models.IntegerField(default=0, verbose_name='Goods damage QTY')),
('goods_weight', models.FloatField(default=0, verbose_name='Goods Weight')),
('goods_volume', models.FloatField(default=0, verbose_name='Goods Volume')),
('creater', models.CharField(max_length=11, verbose_name='Who Created')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('is_delete', models.BooleanField(default=False, verbose_name='Delete Label')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'asndetail',
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='AsnListModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asn_code', models.CharField(max_length=255, verbose_name='ASN Code')),
('asn_status', models.IntegerField(default=1, verbose_name='ASN Status')),
('total_weight', models.FloatField(default=0, verbose_name='Total Weight')),
('total_volume', models.FloatField(default=0, verbose_name='Total Volume')),
('supplier', models.CharField(max_length=255, verbose_name='ASN Supplier')),
('creater', models.CharField(max_length=255, verbose_name='Who Created')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('transportation_fee', models.JSONField(default=dict, verbose_name='Transportation Fee')),
('is_delete', models.BooleanField(default=False, verbose_name='Delete Label')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'asnlist',
'ordering': ['-id'],
},
),
]
| 55.970149 | 114 | 0.5976 | [
"Apache-2.0"
] | chinxianjun2016/GreaterWMS | asn/migrations/0001_initial.py | 3,750 | Python |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
2021-02-06 kl 74.00% 82.28% 77.92% 0.8
2021-02-06 kl 75.25% 80.61% 77.84% 0.75
2021-02-06 kl 71.98% 83.89% 77.48% 0.85
"""
# ------------------------------------------------
VERSION = 'RetinaNet_ICDAR2015_KL_2x_20210205'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "3"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 10000 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = 3
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 1e-3
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'ICDAR2015' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1000
CLASS_NUM = 1
IMG_ROTATE = True
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
NUM_SUBNET_CONV = 4
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90 # or 180
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.8
# -------------------------------------------- KLD
KL_TAU = 2.0
KL_FUNC = 0
| 30.025862 | 105 | 0.64944 | [
"Apache-2.0"
] | Artcs1/PROBIOU | libs/configs_old/ICDAR2015/kl/cfgs_res50_icdar2015_kl_v2.py | 3,483 | Python |
import os
import re
from pathlib import Path
from logging import debug
from cli_ui import debug as verbose
from cli_ui import warning, fatal
from jinja2 import Environment, FileSystemLoader
from gitlabform import EXIT_INVALID_INPUT
from gitlabform.configuration import Configuration
from gitlabform.gitlab import GitLab
from gitlabform.gitlab.core import NotFoundException, UnexpectedResponseException
from gitlabform.processors.abstract_processor import AbstractProcessor
from gitlabform.processors.util.branch_protector import BranchProtector
class FilesProcessor(AbstractProcessor):
def __init__(self, gitlab: GitLab, config: Configuration, strict: bool):
super().__init__("files", gitlab)
self.config = config
self.strict = strict
self.branch_protector = BranchProtector(gitlab, strict)
def _process_configuration(self, project_and_group: str, configuration: dict):
for file in sorted(configuration["files"]):
debug("Processing file '%s'...", file)
if configuration.get("files|" + file + "|skip"):
debug("Skipping file '%s'", file)
continue
if configuration["files"][file]["branches"] == "all":
all_branches = self.gitlab.get_branches(project_and_group)
branches = sorted(all_branches)
elif configuration["files"][file]["branches"] == "protected":
protected_branches = self.gitlab.get_protected_branches(
project_and_group
)
branches = sorted(protected_branches)
else:
all_branches = self.gitlab.get_branches(project_and_group)
branches = []
for branch in configuration["files"][file]["branches"]:
if branch in all_branches:
branches.append(branch)
else:
message = f"! Branch '{branch}' not found, not processing file '{file}' in it"
if self.strict:
fatal(
message,
exit_code=EXIT_INVALID_INPUT,
)
else:
warning(message)
for branch in branches:
verbose(f"Processing file '{file}' in branch '{branch}'")
if configuration.get(
"files|" + file + "|content"
) and configuration.get("files|" + file + "|file"):
fatal(
f"File '{file}' in '{project_and_group}' has both `content` and `file` set - "
"use only one of these keys.",
exit_code=EXIT_INVALID_INPUT,
)
if configuration.get("files|" + file + "|delete"):
try:
self.gitlab.get_file(project_and_group, branch, file)
debug("Deleting file '%s' in branch '%s'", file, branch)
self.modify_file_dealing_with_branch_protection(
project_and_group,
branch,
file,
"delete",
configuration,
)
except NotFoundException:
debug(
"Not deleting file '%s' in branch '%s' (already doesn't exist)",
file,
branch,
)
else:
# change or create file
if configuration.get("files|" + file + "|content"):
new_content = configuration.get("files|" + file + "|content")
else:
path_in_config = Path(
configuration.get("files|" + file + "|file")
)
if path_in_config.is_absolute():
# TODO: does this work? we are reading the content twice in this case...
path = path_in_config.read_text()
else:
# relative paths are relative to config file location
path = Path(
os.path.join(
self.config.config_dir, str(path_in_config)
)
)
new_content = path.read_text()
if configuration.get("files|" + file + "|template", True):
new_content = self.get_file_content_as_template(
new_content,
project_and_group,
**configuration.get("files|" + file + "|jinja_env", dict()),
)
try:
current_content = self.gitlab.get_file(
project_and_group, branch, file
)
if current_content != new_content:
if configuration.get("files|" + file + "|overwrite"):
debug("Changing file '%s' in branch '%s'", file, branch)
self.modify_file_dealing_with_branch_protection(
project_and_group,
branch,
file,
"modify",
configuration,
new_content,
)
else:
debug(
"Not changing file '%s' in branch '%s' - overwrite flag not set.",
file,
branch,
)
else:
debug(
"Not changing file '%s' in branch '%s' - it's content is already"
" as provided)",
file,
branch,
)
except NotFoundException:
debug("Creating file '%s' in branch '%s'", file, branch)
self.modify_file_dealing_with_branch_protection(
project_and_group,
branch,
file,
"add",
configuration,
new_content,
)
if configuration.get("files|" + file + "|only_first_branch"):
verbose("Skipping other branches for this file, as configured.")
break
def modify_file_dealing_with_branch_protection(
self,
project_and_group,
branch,
file,
operation,
configuration,
new_content=None,
):
# perhaps your user permissions are ok to just perform this operation regardless
# of the branch protection...
try:
self.just_modify_file(
project_and_group, branch, file, operation, configuration, new_content
)
except UnexpectedResponseException as e:
if (
e.response_status_code == 400
and "You are not allowed to push into this branch" in e.response_text
):
# ...but if not, then we can unprotect the branch, but only if we know how to
# protect it again...
if configuration.get("branches|" + branch + "|protected"):
debug(
f"> Temporarily unprotecting the branch to {operation} a file in it..."
)
self.branch_protector.unprotect_branch(project_and_group, branch)
else:
fatal(
f"Operation {operation} on file {file} in branch {branch} not permitted,"
f" but we don't have a branch protection configuration provided for this"
f" branch. Breaking as we cannot unprotect the branch as we would not know"
f" how to protect it again.",
EXIT_INVALID_INPUT,
)
try:
self.just_modify_file(
project_and_group,
branch,
file,
operation,
configuration,
new_content,
)
finally:
# ...and protect the branch again after the operation
if configuration.get("branches|" + branch + "|protected"):
debug("> Protecting the branch again.")
self.branch_protector.protect_branch(
project_and_group, configuration, branch
)
else:
raise e
def just_modify_file(
self,
project_and_group,
branch,
file,
operation,
configuration,
new_content=None,
):
if operation == "modify":
self.gitlab.set_file(
project_and_group,
branch,
file,
new_content,
self.get_commit_message_for_file_change("change", file, configuration),
)
elif operation == "add":
self.gitlab.add_file(
project_and_group,
branch,
file,
new_content,
self.get_commit_message_for_file_change("add", file, configuration),
)
elif operation == "delete":
self.gitlab.delete_file(
project_and_group,
branch,
file,
self.get_commit_message_for_file_change("delete", file, configuration),
)
def get_file_content_as_template(self, template, project_and_group, **kwargs):
# Use jinja with variables project and group
rtemplate = Environment(
loader=FileSystemLoader("."), autoescape=True
).from_string(template)
return rtemplate.render(
project=self.get_project(project_and_group),
group=self.get_group(project_and_group),
**kwargs,
)
@staticmethod
def get_commit_message_for_file_change(operation, file, configuration: dict):
commit_message = configuration.get(
"files|" + file + "|commit_message",
"Automated %s made by gitlabform" % operation,
)
# add '[skip ci]' to commit message to skip CI job, as documented at
# https://docs.gitlab.com/ee/ci/yaml/README.html#skipping-jobs
skip_build = configuration.get("files|" + file + "|skip_ci")
skip_build_str = " [skip ci]" if skip_build else ""
return "%s%s" % (commit_message, skip_build_str)
@staticmethod
def get_group(project_and_group):
return re.match("(.*)/.*", project_and_group).group(1)
@staticmethod
def get_project(project_and_group):
return re.match(".*/(.*)", project_and_group).group(1)
| 40.905594 | 102 | 0.471921 | [
"MIT"
] | egnyte/gitlabform | gitlabform/processors/project/files_processor.py | 11,699 | Python |
from typing import Any, Dict, List, Union
import numpy as np
import logging
import os
# Create a custom logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class RandomSearch:
@staticmethod
def random_choice(args: List[Any], n: int = 1):
"""
pick a random element from a set.
Example:
>> sampler = RandomSearch.random_choice(1,2,3)
>> sampler()
2
"""
choices = []
for arg in args:
choices.append(arg)
if n == 1:
return lambda: np.random.choice(choices, replace=False)
else:
return lambda: np.random.choice(choices, n, replace=False)
@staticmethod
def random_integer(low: Union[int, float], high: Union[int, float]):
"""
pick a random integer between two bounds
Example:
>> sampler = RandomSearch.random_integer(1, 10)
>> sampler()
9
"""
return lambda: int(np.random.randint(low, high))
@staticmethod
def random_loguniform(low: Union[float, int], high: Union[float, int]):
"""
pick a random float between two bounds, using loguniform distribution
Example:
>> sampler = RandomSearch.random_loguniform(1e-5, 1e-2)
>> sampler()
0.0004
"""
return lambda: np.exp(np.random.uniform(np.log(low), np.log(high)))
@staticmethod
def random_uniform(low: Union[float, int], high: Union[float, int]):
"""
pick a random float between two bounds, using uniform distribution
Example:
>> sampler = RandomSearch.random_uniform(0, 1)
>> sampler()
0.01
"""
return lambda: np.random.uniform(low, high)
class HyperparameterSearch:
def __init__(self, **kwargs):
self.search_space = {}
self.lambda_ = lambda: 0
for key, val in kwargs.items():
self.search_space[key] = val
def parse(self, val: Any):
if isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (float, np.float)):
return val
elif isinstance(val, (np.ndarray, list)):
return " ".join(val)
elif val is None:
return None
if isinstance(val, str):
return val
else:
val = val()
if isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (np.ndarray, list)):
return " ".join(val)
else:
return val
def sample(self) -> Dict:
res = {}
for key, val in self.search_space.items():
try:
res[key] = self.parse(val)
except (TypeError, ValueError) as error:
logger.error(f"Could not parse key {key} with value {val}. {error}")
return res
def update_environment(self, sample) -> None:
for key, val in sample.items():
os.environ[key] = str(val)
SEARCH_SPACE = {
"penalty": RandomSearch.random_choice(["l1", "l2"]),
"C": RandomSearch.random_uniform(0, 1),
"solver": "liblinear",
"multi_class": "auto",
"tol": RandomSearch.random_loguniform(10e-5, 10e-3),
"stopwords": RandomSearch.random_choice([0, 1]),
"weight": RandomSearch.random_choice(["hash"]),
"ngram_range": RandomSearch.random_choice(["1 2", "2 3", "1 3"]),
"random_state": RandomSearch.random_integer(0, 100000)
}
BEST_HPS = {
"penalty": "l1",
"C": 0.977778,
"multi_class": "auto",
"solver": "liblinear",
"tol": 0.000816,
"ngram_range": "1 2",
"random_state": 44555,
"weight": "hash",
"stopwords": None
} | 28.97037 | 84 | 0.538226 | [
"Apache-2.0"
] | kernelmachine/quality-filter | lr/hyperparameters.py | 3,911 | Python |
import torch
from tqdm import tqdm
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import flask
from flask import Flask, request
from ...utils.deploy import get_free_tcp_port
from ...utils.learning import adjust_learning_rate
from ...utils.log import logger
from ...base.module import Module
from .config import DEVICE, DEFAULT_CONFIG
from .model import LMConfig, RNNLM
from .tool import lm_tool, light_tokenize, TEXT
class LM(Module):
def __init__(self):
self._model = None
self._word_vocab = None
def train(self, train_path, save_path=DEFAULT_CONFIG['save_path'], dev_path=None, vectors_path=None, log_dir=None,
**kwargs):
writer = SummaryWriter(log_dir=log_dir)
train_dataset = lm_tool.get_dataset(train_path)
if dev_path:
dev_dataset = lm_tool.get_dataset(dev_path)
word_vocab = lm_tool.get_vocab(train_dataset, dev_dataset)
else:
word_vocab = lm_tool.get_vocab(train_dataset)
self._word_vocab = word_vocab
config = LMConfig(word_vocab, save_path=save_path, vector_path=vectors_path, **kwargs)
train_iter = lm_tool.get_iterator(train_dataset, batch_size=config.batch_size,
bptt_len=config.bptt_len)
rnnlm = RNNLM(config)
self._model = rnnlm
optim = torch.optim.Adam(rnnlm.parameters(), lr=config.lr)
for epoch in range(config.epoch):
rnnlm.train()
acc_loss = 0
for item in tqdm(train_iter):
optim.zero_grad()
logits = rnnlm(item.text)
item_loss = F.cross_entropy(logits, item.target.view(-1))
acc_loss += item_loss.item()
item_loss.backward()
optim.step()
logger.info('epoch: {}, acc_loss: {}'.format(epoch, acc_loss))
writer.add_scalar('lm_train/acc_loss', acc_loss, epoch)
if dev_path:
dev_score = self._validate(dev_dataset)
logger.info('dev score:{}'.format(dev_score))
writer.add_scalar('lm_train/dev_score', dev_score, epoch)
writer.flush()
adjust_learning_rate(optim, config.lr / (1 + (epoch + 1) * config.lr_decay))
writer.close()
config.save()
rnnlm.save()
def load(self, save_path=DEFAULT_CONFIG['save_path']):
config = LMConfig.load(save_path)
rnnlm = RNNLM(config)
rnnlm .load()
self._model = rnnlm
self._word_vocab = config.word_vocab
def test(self, test_path):
test_dataset = lm_tool.get_dataset(test_path)
if not hasattr(TEXT, 'vocab'):
TEXT.vocab = self._word_vocab
test_score = self._validate(test_dataset)
logger.info('test score:{}'.format(test_score))
def _validate(self, dev_dataset):
self._model.eval()
dev_score_list = []
dev_iter = lm_tool.get_iterator(dev_dataset, batch_size=DEFAULT_CONFIG['batch_size'],
bptt_len=DEFAULT_CONFIG['bptt_len'])
for dev_item in tqdm(dev_iter):
item_score = lm_tool.get_score(self._model, dev_item.text, dev_item.target)
dev_score_list.append(item_score)
# print(dev_score_list)
return sum(dev_score_list) / len(dev_score_list)
def _predict_next_word_max(self, sentence_list: list):
test_item = torch.tensor([[self._word_vocab.stoi[x]] for x in sentence_list], device=DEVICE)
pred_prob, pred_index = torch.max(torch.softmax(self._model(test_item)[-1], dim=0).cpu().data, dim=0)
pred_word = TEXT.vocab.itos[pred_index]
pred_prob = pred_prob.item()
return pred_word, pred_prob
def _predict_next_word_sample(self, sentence_list: list):
# 进行分布式采样,以获得随机结果
test_item = torch.tensor([[self._word_vocab.stoi[x]] for x in sentence_list], device=DEVICE)
pred_index = torch.multinomial(torch.softmax(self._model(test_item)[-1], dim=0).cpu().data, 1)
pred_word = self._word_vocab.itos[pred_index]
return pred_word
def _predict_next_word_topk(self, sentence_list: list, topK=5):
# 获取topK个next个词的可能取值和对应概率
test_item = torch.tensor([[self._word_vocab.stoi[x]] for x in sentence_list], device=DEVICE)
predict_softmax = torch.softmax(self._model(test_item)[-1], dim=0).cpu().data
topK_prob, topK_index = torch.topk(predict_softmax, topK)
topK_prob = topK_prob.tolist()
topK_vocab = [self._word_vocab.itos[x] for x in topK_index]
return list(zip(topK_vocab, topK_prob))
def _predict_next_word_prob(self, sentence_list: list, next_word: str):
test_item = torch.tensor([[self._word_vocab.stoi[x]] for x in sentence_list], device=DEVICE)
predict_prob = torch.softmax(self._model(test_item)[-1], dim=0).cpu().data
next_word_index = self._word_vocab.stoi[next_word]
return predict_prob[next_word_index]
def next_word(self, sentence: str, next_word: str):
self._model.eval()
temp_str = [x for x in light_tokenize(sentence)]
predict_prob = self._predict_next_word_prob(temp_str, next_word)
return predict_prob.item()
def _next_word_score(self, sentence: str, next_word: str):
self._model.eval()
temp_str = [x for x in light_tokenize(sentence)]
predict_prob = self._predict_next_word_prob(temp_str, next_word)
return torch.log10(predict_prob).item()
def next_word_topk(self, sentence: str, topK=5):
self._model.eval()
return self._predict_next_word_topk(sentence, topK)
def sentence_score(self, sentence: str):
self._model.eval()
total_score = 0
assert len(sentence) > 1
for i in range(1, len(sentence)):
temp_score = self._next_word_score(sentence[:i], sentence[i])
total_score += temp_score
return total_score
def _predict_sentence(self, sentence: str, gen_len=30):
results = []
temp_str = [x for x in light_tokenize(sentence)]
for i in range(gen_len):
temp_result = self._predict_next_word_sample(temp_str)
results.append(temp_result)
temp_str.append(temp_result)
return results
def generate_sentence(self, sentence: str, gen_len=30):
self._model.eval()
results = self._predict_sentence(sentence, gen_len)
predict_sen = ''.join([x for x in results])
return sentence + predict_sen
def deploy(self, route_path="/lm", host="localhost", port=None, debug=False):
app = Flask(__name__)
@app.route(route_path + "/next_word", methods=['POST', 'GET'])
def next_word():
sentence = request.args.get('sentence', '')
word = request.args.get('word', '')
result = self.next_word(sentence, word)
return flask.jsonify({
'state': 'OK',
'result': {
'prob': result
}
})
@app.route(route_path + "/generate_sentence", methods=['POST', 'GET'])
def generate_sentence():
sentence = request.args.get('sentence', '')
gen_len = int(request.args.get('gen_len', 30))
result = self.generate_sentence(sentence, gen_len)
return flask.jsonify({
'state': 'OK',
'result': {
'sentence': result
}
})
@app.route(route_path + "/next_word_topk", methods=['POST', 'GET'])
def next_word_topk():
sentence = request.args.get('sentence', '')
topk = int(request.args.get('topk', 5))
result = self.next_word_topk(sentence, topK=topk)
return flask.jsonify({
'state': 'OK',
'result': {
'words': result
}
})
@app.route(route_path + "/sentence_score", methods=['POST', 'GET'])
def sentence_score():
sentence = request.args.get('sentence', '')
result = self.sentence_score(sentence)
return flask.jsonify({
'state': 'OK',
'result': {
'score': result
}
})
if not port:
port = get_free_tcp_port()
app.run(host=host, port=port, debug=debug)
| 40.69378 | 118 | 0.6107 | [
"Apache-2.0"
] | smilelight/lightNLP | lightnlp/tg/lm/module.py | 8,565 | Python |
import concurrent.futures
import threading
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import attr
import uuid
import asyncio
from asyncio import ensure_future
from typing import Any, Union, Coroutine, Callable, Generator, TypeVar, \
Awaitable
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
# pylint: disable=invalid-name
T = TypeVar('T')
CALLABLE_T = TypeVar('CALLABLE_T', bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
_LOGGER = logger_code
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_LOGGER.info("env <python 3.7")
_T = TypeVar('_T')
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
print('loop final')
asyncio.set_event_loop(None)
loop.close()
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, '_edge_callback', True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, '_edge_callback', False) is True
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id = attr.ib(
type=str,
default=None,
)
id = attr.ib(
type=str,
default=attr.Factory(lambda: uuid.uuid4().hex),
)
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {
'id': self.id,
'user_id': self.user_id,
}
def _set_result_unless_cancelled(fut: Future, result: Any) -> None:
"""Set the result only if the Future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(
concurr: concurrent.futures.Future,
source: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source: Union[concurrent.futures.Future, Future],
dest: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(
source: Union[concurrent.futures.Future, Future],
destination: Union[concurrent.futures.Future, Future]) -> None:
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
if isinstance(source, Future):
source_loop = source._loop # type: ignore
else:
source_loop = None
if isinstance(destination, Future):
dest_loop = destination._loop # type: ignore
else:
dest_loop = None
def _set_state(future: Union[concurrent.futures.Future, Future],
other: Union[concurrent.futures.Future, Future]) -> None:
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(
destination: Union[concurrent.futures.Future, Future]) -> None:
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(
source: Union[concurrent.futures.Future, Future]) -> None:
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(
coro: Union[Coroutine, Generator],
loop: AbstractEventLoop) -> concurrent.futures.Future:
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def callback() -> None:
"""Handle the call to the coroutine."""
try:
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro: Coroutine,
loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(loop: AbstractEventLoop, callback: Callable,
*args: Any) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
NOTE: This code references home-assistant.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future | 33.296 | 79 | 0.658097 | [
"Apache-2.0"
] | hobo0cn/MerceEdge | merceedge/util/async_util.py | 8,324 | Python |
import requests
import json
import os
import copy
import smtplib
import jwt
from datetime import datetime, timedelta
# SMTP 라이브러리
from string import Template # 문자열 템플릿 모듈
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.core.validators import validate_email, ValidationError
# email templete
from .templates import EMAIL_TEMPLATE
from django.http import HttpResponseRedirect
# my settings
from my_settings import (
EMAIL_ADDRESS,
EMAIL_PASSWORD,
SECRET,
ALGORITHM,
)
from django.shortcuts import redirect
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth import get_user_model
from django.http import JsonResponse, HttpResponse
from django.db.models import Q
from .models import User, Corrector, Applicant
from .serializer import (
ApplicantSerializer,
UserSerializer,
CorrectorSerializer,
)
# from rest_framework.views import APIView
from rest_framework import viewsets
from allauth.socialaccount.providers.kakao import views as kakao_views
from allauth.socialaccount.providers.kakao.views import KakaoOAuth2Adapter
from allauth.socialaccount.providers.oauth2.client import OAuth2Client
from rest_auth.registration.views import SocialLoginView
from rest_framework.decorators import (
api_view,
permission_classes,
authentication_classes,
)
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
ip = "192.168.0.137"
class ApplicantView(viewsets.ModelViewSet):
queryset = Applicant.objects.all()
serializer_class = ApplicantSerializer
class UserView(viewsets.ViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
User = get_user_model()
def kakaoSignup(request):
# def post(self, request):
access_token = request.headers["Authorization"]
profile_request = requests.post(
"https://kapi.kakao.com/v2/user/me",
headers={"Authorization": f"Bearer {access_token}"},
)
profile_json = profile_request.json()
kakao_id = profile_request.json().get("id")
print("profile_json : ", profile_json)
kakao_account = profile_json.get("kakao_account")
print("kakao_account : ", kakao_account)
email = kakao_account.get("email", None)
profile = kakao_account.get("profile")
print("profile : ", profile)
nickname = profile.get("nickname")
profile_image = profile.get("profile_image_url")
data = {"access_token": access_token}
accept = requests.post("http://localhost:8000/accounts/rest-auth/kakao/", data=data)
accept_json = accept.json()
print(accept_json)
accept_jwt = accept_json.get("token")
try:
user = User.objects.get(kakao_id=kakao_id)
return JsonResponse(
{
"access_token": accept_jwt,
"name": nickname,
"image": profile_image,
"is_corrector": user.is_corrector,
"is_phone_auth": user.is_phone_auth,
},
status=200,
)
# redirect('http://localhost:3000/')
except User.DoesNotExist:
User.objects.filter(email=email).update(
kakao_id=kakao_id, name=nickname, thumbnail_image=profile_image,
)
user = User.objects.get(kakao_id=kakao_id)
return JsonResponse(
{
"access_token": accept_jwt,
"name": nickname,
"image": profile_image,
"is_corrector": user.is_corrector,
"is_phone_auth": user.is_phone_auth,
},
status=200,
)
class KakaoLoginView(SocialLoginView):
adapter_class = kakao_views.KakaoOAuth2Adapter
client_class = OAuth2Client
class EmailHTMLContent:
"""e메일에 담길 컨텐츠"""
def __init__(self, str_subject, template, template_params):
"""string template과 딕셔너리형 template_params받아 MIME 메시지를 만든다"""
assert isinstance(template, Template)
assert isinstance(template_params, dict)
self.msg = MIMEMultipart()
# e메일 제목을 설정한다
self.msg["Subject"] = str_subject # e메일 제목을 설정한다
# e메일 본문을 설정한다
str_msg = template.safe_substitute(**template_params) # ${변수} 치환하며 문자열 만든다
# MIME HTML 문자열을 만든다
mime_msg = MIMEText(str_msg, "html")
self.msg.attach(mime_msg)
def get_message(self, str_from_email_addr, str_to_email_addr):
"""발신자, 수신자리스트를 이용하여 보낼메시지를 만든다 """
send_msg = copy.deepcopy(self.msg)
send_msg["From"] = str_from_email_addr # 발신자
# ",".join(str_to_email_addrs) : 수신자리스트 2개 이상인 경우
send_msg["To"] = str_to_email_addr
return send_msg
class EmailSender:
"""e메일 발송자"""
def __init__(self, str_host, num_port):
"""호스트와 포트번호로 SMTP로 연결한다 """
self.str_host = str_host
self.num_port = num_port
self.smtp_connect = smtplib.SMTP(host=str_host, port=num_port)
# SMTP인증이 필요하면 아래 주석을 해제하세요.
self.smtp_connect.starttls() # TLS(Transport Layer Security) 시작
self.smtp_connect.login(EMAIL_ADDRESS, EMAIL_PASSWORD) # 메일서버에 연결한 계정과 비밀번호
def send_message(self, emailContent, str_from_email_addr, str_to_email_addr):
"""e메일을 발송한다 """
contents = emailContent.get_message(str_from_email_addr, str_to_email_addr)
self.smtp_connect.send_message(
contents, from_addr=str_from_email_addr, to_addrs=str_to_email_addr
)
del contents
class EmailAuthenticationView(viewsets.ModelViewSet):
_COMMON_EMAIL_ADDRESS = [
"hanmail.net",
"hotmail.com",
"yahoo.co.kr",
"yahoo.com",
"hanmir.com",
"nate.com",
"dreamwiz.com",
"freechal.com",
"teramail.com",
"metq.com",
"lycos.co.kr",
"chol.com",
"korea.com",
".edu",
".ac.kr",
# "naver.com"
]
serializer_class = CorrectorSerializer
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
@classmethod
def create(cls, request, *args, **kwargs):
try:
company_email = request.data["email"] # company email
user_email = request.user.email
if User.objects.filter(email=user_email, is_corrector=True).exists():
return JsonResponse({"message": "ALREADY AUTHENTICATED"}, status=406)
user = User.objects.get(email=user_email)
try:
# email validator
validate_email(company_email)
Corrector(user=user, company_email=company_email).save()
except ValidationError:
return JsonResponse({"message": "INVALID EMAIL"}, status=400)
except PermissionError:
return JsonResponse({"message": "PERMISSION ERROR"}, status=401)
if company_email.split("@")[1] in cls._COMMON_EMAIL_ADDRESS:
return JsonResponse(
{"message": "NOT COMPANY EMAIL ADDRESS"}, status=400
)
str_host = "smtp.gmail.com"
num_port = 587 # SMTP Port
emailSender = EmailSender(str_host, num_port)
str_subject = "[픽소서] EMAIL 인증을 완료해주세요!" # e메일 제목
auth_token = jwt.encode(
{"email": company_email, "name": user.name},
SECRET["secret"],
algorithm=ALGORITHM,
).decode("utf-8")
template = Template(EMAIL_TEMPLATE)
template_params = {
"From": EMAIL_ADDRESS,
"Token": auth_token,
"BackendIp": ip,
}
emailHTMLContent = EmailHTMLContent(str_subject, template, template_params)
str_from_email_addr = EMAIL_ADDRESS # 발신자
str_to_email_addr = company_email # 수신자/ 2개 이상인 경우 리스트
emailSender.send_message(
emailHTMLContent, str_from_email_addr, str_to_email_addr
)
User.objects.filter(email=user.email).update(email_auth_token=auth_token)
return JsonResponse(
{"message": "EMAIL SENT", "EMAIL_AUTH_TOKEN": auth_token}, status=200
)
except KeyError as e:
return JsonResponse({"message": f"KEY ERROR {e}"}, status=400)
class EmailAuthSuccView(viewsets.ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
def create(self, request, *args, **kwargs):
auth_token = self.kwargs["auth_token"]
user_queryset = self.queryset.filter(email_auth_token=auth_token)
try:
if user_queryset.exists():
user = User.objects.get(
email_auth_token=user_queryset.first().email_auth_token
)
user.is_corrector = True
user.save()
return HttpResponseRedirect(f"http://localhost:3000/")
return JsonResponse({"message": "USER DOES NOT EXIST"}, status=400)
except jwt.exceptions.DecodeError:
return JsonResponse({"message": "INVALID TOKEN"}, status=400)
except jwt.exceptions.ExpiredSignatureError:
return JsonResponse({"message": "EXPIRED TOKEN"}, status=400)
| 39.587234 | 88 | 0.646136 | [
"MIT"
] | Einsicht1/recipe-app-api | app/user/example.py | 9,725 | Python |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pickle
from typing import List
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
from rl_coach.utilities.shared_running_stats import NumpySharedRunningStats, NumpySharedRunningStats
class ObservationNormalizationFilter(ObservationFilter):
"""
Normalizes the observation values with a running mean and standard deviation of
all the observations seen so far. The normalization is performed element-wise. Additionally, when working with
multiple workers, the statistics used for the normalization operation are accumulated over all the workers.
"""
def __init__(self, clip_min: float=-5.0, clip_max: float=5.0, name='observation_stats'):
"""
:param clip_min: The minimum value to allow after normalizing the observation
:param clip_max: The maximum value to allow after normalizing the observation
"""
super().__init__()
self.clip_min = clip_min
self.clip_max = clip_max
self.running_observation_stats = None
self.name = name
self.supports_batching = True
self.observation_space = None
def set_device(self, device, memory_backend_params=None, mode='numpy') -> None:
"""
An optional function that allows the filter to get the device if it is required to use tensorflow ops
:param device: the device to use
:memory_backend_params: if not None, holds params for a memory backend for sharing data (e.g. Redis)
:param mode: the arithmetic module to use {'tf' | 'numpy'}
:return: None
"""
if mode == 'tf':
from rl_coach.architectures.tensorflow_components.shared_variables import TFSharedRunningStats
self.running_observation_stats = TFSharedRunningStats(device, name=self.name, create_ops=False,
pubsub_params=memory_backend_params)
elif mode == 'numpy':
self.running_observation_stats = NumpySharedRunningStats(name=self.name,
pubsub_params=memory_backend_params)
def set_session(self, sess) -> None:
"""
An optional function that allows the filter to get the session if it is required to use tensorflow ops
:param sess: the session
:return: None
"""
self.running_observation_stats.set_session(sess)
def filter(self, observations: List[ObservationType], update_internal_state: bool=True) -> ObservationType:
observations = np.array(observations)
if update_internal_state:
self.running_observation_stats.push(observations)
self.last_mean = self.running_observation_stats.mean
self.last_stdev = self.running_observation_stats.std
return self.running_observation_stats.normalize(observations)
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
self.running_observation_stats.set_params(shape=input_observation_space.shape,
clip_values=(self.clip_min, self.clip_max))
return input_observation_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.save_state_to_checkpoint(checkpoint_dir, checkpoint_prefix)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.restore_state_from_checkpoint(checkpoint_dir, checkpoint_prefix)
| 47.977778 | 114 | 0.712367 | [
"Apache-2.0"
] | AustinDeric/coach | rl_coach/filters/observation/observation_normalization_filter.py | 4,318 | Python |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SaharaException(Exception):
"""Base Exception for the project
To correctly use this class, inherit from it and define
a 'message' and 'code' properties.
"""
message = "An unknown exception occurred"
code = "UNKNOWN_EXCEPTION"
def __str__(self):
return self.message
def __init__(self):
super(SaharaException, self).__init__(
'%s: %s' % (self.code, self.message))
class NotFoundException(SaharaException):
message = "Object '%s' is not found"
# It could be a various property of object which was not found
value = None
def __init__(self, value, message=None):
self.code = "NOT_FOUND"
self.value = value
if message:
self.message = message % value
class NameAlreadyExistsException(SaharaException):
message = "Name already exists"
def __init__(self, message=None):
self.code = "NAME_ALREADY_EXISTS"
if message:
self.message = message
class InvalidCredentials(SaharaException):
message = "Invalid credentials"
def __init__(self, message=None):
self.code = "INVALID_CREDENTIALS"
if message:
self.message = message
class InvalidException(SaharaException):
message = "Invalid object reference"
def __init__(self, message=None):
self.code = "INVALID_REFERENCE"
if message:
self.message = message
class RemoteCommandException(SaharaException):
message = "Error during command execution: \"%s\""
def __init__(self, cmd, ret_code=None, stdout=None,
stderr=None):
self.code = "REMOTE_COMMAND_FAILED"
self.cmd = cmd
self.ret_code = ret_code
self.stdout = stdout
self.stderr = stderr
self.message = self.message % cmd
if ret_code:
self.message += '\nReturn code: ' + str(ret_code)
if stderr:
self.message += '\nSTDERR:\n' + stderr
if stdout:
self.message += '\nSTDOUT:\n' + stdout
self.message = self.message.decode('ascii', 'ignore')
class InvalidDataException(SaharaException):
"""General exception to use for invalid data
A more useful message should be passed to __init__ which
tells the user more about why the data is invalid.
"""
message = "Data is invalid"
code = "INVALID_DATA"
def __init__(self, message=None):
if message:
self.message = message
class BadJobBinaryInternalException(SaharaException):
message = ("Job binary internal data must be a string of length "
"greater than zero")
def __init__(self, message=None):
if message:
self.message = message
self.code = "BAD_JOB_BINARY"
class BadJobBinaryException(SaharaException):
message = ("To work with JobBinary located in internal swift add 'user'"
" and 'password' to extra")
def __init__(self, message=None):
if message:
self.message = message
self.code = "BAD_JOB_BINARY"
class DBDuplicateEntry(SaharaException):
message = "Database object already exists"
code = "DB_DUPLICATE_ENTRY"
def __init__(self, message=None):
if message:
self.message = message
class DeletionFailed(SaharaException):
message = "Object was not deleted"
code = "DELETION_FAILED"
def __init__(self, message=None):
if message:
self.message = message
class MissingFloatingNetworkException(SaharaException):
def __init__(self, ng_name):
self.message = ("Node Group %s is missing 'floating_ip_pool' "
"field" % ng_name)
self.code = "MISSING_FLOATING_NETWORK"
class SwiftClientException(SaharaException):
'''General wrapper object for swift client exceptions
This exception is intended for wrapping the message from a
swiftclient.ClientException in a SaharaException. The ClientException
should be caught and an instance of SwiftClientException raised instead.
'''
def __init__(self, message):
self.message = message
self.code = "SWIFT_CLIENT_EXCEPTION"
class DataTooBigException(SaharaException):
message = "Size of data (%s) is greater than maximum (%s)"
def __init__(self, size, maximum, message=None):
if message:
self.message = message
self.message = self.message % (size, maximum)
self.code = "DATA_TOO_BIG"
class ThreadException(SaharaException):
def __init__(self, thread_description, e):
self.message = "An error occurred in thread '%s': %s" % (
thread_description, str(e))
self.code = "THREAD_EXCEPTION"
class NotImplementedException(SaharaException):
code = "NOT_IMPLEMENTED"
def __init__(self, feature):
self.message = "Feature '%s' is not implemented" % feature
class HeatStackException(SaharaException):
def __init__(self, heat_stack_status):
self.code = "HEAT_STACK_EXCEPTION"
self.message = "Heat stack failed with status %s" % heat_stack_status
class ConfigurationError(SaharaException):
code = "CONFIGURATION_ERROR"
def __init__(self, message):
self.message = message
class IncorrectStateError(SaharaException):
code = "INCORRECT_STATE_ERROR"
def __init__(self, message):
self.message = message
class SystemError(SaharaException):
code = "SYSTEM_ERROR"
def __init__(self, message):
self.message = message
| 27.926941 | 77 | 0.665468 | [
"Apache-2.0"
] | hortonworksqe/sahara | sahara/exceptions.py | 6,116 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-07-29 06:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0495_auto_20200729_1345'),
]
operations = [
migrations.AlterField(
model_name='globalsettings',
name='key',
field=models.CharField(choices=[('document_object_disposal_period', 'Document Object Disposal Period'), ('licence_renew_days', 'Licence Renewal Period Days'), ('physical_object_disposal_period', 'Physical Object Disposal Period')], max_length=255, unique=True),
),
]
| 32.857143 | 273 | 0.684058 | [
"Apache-2.0"
] | Djandwich/wildlifecompliance | wildlifecompliance/migrations/0496_auto_20200729_1408.py | 690 | Python |
from django.db import models
from django.db.models.query import QuerySet, Q
from django.db.models.base import ModelBase
from django.db.models.fields.related import RelatedField
from django.conf import settings
from utils import NestedSet
from signals import pre_publish, post_publish
# this takes some inspiration from the publisher stuff in
# django-cms 2.0
# e.g. http://github.com/digi604/django-cms-2.0/blob/master/publisher/models.py
#
# but we want this to be a reusable/standalone app and have a few different needs
#
class PublishException(Exception):
pass
class UnpublishException(Exception):
pass
class PublishableQuerySet(QuerySet):
def changed(self):
'''all draft objects that have not been published yet'''
return self.filter(Publishable.Q_CHANGED)
def deleted(self):
'''public objects that need deleting'''
return self.filter(Publishable.Q_DELETED)
def draft(self):
'''all draft objects'''
return self.filter(Publishable.Q_DRAFT)
def draft_and_deleted(self):
return self.filter(Publishable.Q_DRAFT | Publishable.Q_DELETED)
def published(self):
'''all public/published objects'''
return self.filter(Publishable.Q_PUBLISHED)
def publish(self, all_published=None):
'''publish all models in this queryset'''
if all_published is None:
all_published = NestedSet()
for p in self:
p.publish(all_published=all_published)
def delete(self, mark_for_deletion=True):
'''
override delete so that we call delete on each object separately, as delete needs
to set some flags etc
'''
for p in self:
p.delete(mark_for_deletion=mark_for_deletion)
class PublishableManager(models.Manager):
def get_query_set(self):
return PublishableQuerySet(self.model)
def changed(self):
'''all draft objects that have not been published yet'''
return self.get_query_set().changed()
def deleted(self):
'''public objects that need deleting'''
return self.get_query_set().deleted()
def draft(self):
'''all draft objects'''
return self.get_query_set().draft()
def draft_and_deleted(self):
return self.get_query_set().draft_and_deleted()
def published(self):
'''all public/published objects'''
return self.get_query_set().published()
class PublishableBase(ModelBase):
def __new__(cls, name, bases, attrs):
new_class = super(PublishableBase, cls).__new__(cls, name, bases, attrs)
# insert an extra permission in for "Can publish"
# as well as a "method" to find name of publish_permission for this object
opts = new_class._meta
name = u'Can publish %s' % opts.verbose_name
code = u'publish_%s' % opts.object_name.lower()
opts.permissions = tuple(opts.permissions) + ((code, name), )
opts.get_publish_permission = lambda: code
return new_class
class Publishable(models.Model):
__metaclass__ = PublishableBase
PUBLISH_DEFAULT = 0
PUBLISH_CHANGED = 1
PUBLISH_DELETE = 2
PUBLISH_CHOICES = ((PUBLISH_DEFAULT, 'Published'), (PUBLISH_CHANGED, 'Changed'), (PUBLISH_DELETE, 'To be deleted'))
# make these available here so can easily re-use them in other code
Q_PUBLISHED = Q(is_public=True)
Q_DRAFT = Q(is_public=False) & ~Q(publish_state=PUBLISH_DELETE)
Q_CHANGED = Q(is_public=False, publish_state=PUBLISH_CHANGED)
Q_DELETED = Q(is_public=False, publish_state=PUBLISH_DELETE)
is_public = models.BooleanField(default=False, editable=False, db_index=True)
publish_state = models.IntegerField('Publication status', editable=False, db_index=True, choices=PUBLISH_CHOICES, default=PUBLISH_DEFAULT)
public = models.OneToOneField('self', related_name='draft', null=True,
editable=False, on_delete=models.SET_NULL)
class Meta:
abstract = True
class PublishMeta(object):
publish_exclude_fields = ['id', 'is_public', 'publish_state', 'public', 'draft']
publish_reverse_fields = []
publish_functions = {}
@classmethod
def _combined_fields(cls, field_name):
fields = []
for clazz in cls.__mro__:
fields.extend(getattr(clazz, field_name, []))
return fields
@classmethod
def excluded_fields(cls):
return cls._combined_fields('publish_exclude_fields')
@classmethod
def reverse_fields_to_publish(cls):
return cls._combined_fields('publish_reverse_fields')
@classmethod
def find_publish_function(cls, field_name, default_function):
'''
Search to see if there is a function to copy the given field over.
Function should take same params as setattr()
'''
for clazz in cls.__mro__:
publish_functions = getattr(clazz, 'publish_functions', {})
fn = publish_functions.get(field_name, None)
if fn:
return fn
return default_function
objects = PublishableManager()
def is_marked_for_deletion(self):
return self.publish_state == Publishable.PUBLISH_DELETE
def get_public_absolute_url(self):
if self.public:
get_absolute_url = getattr(self.public, 'get_absolute_url', None)
if get_absolute_url:
return get_absolute_url()
return None
def save(self, mark_changed=True, *arg, **kw):
if not self.is_public and mark_changed:
if self.publish_state == Publishable.PUBLISH_DELETE:
raise PublishException("Attempting to save model marked for deletion")
self.publish_state = Publishable.PUBLISH_CHANGED
super(Publishable, self).save(*arg, **kw)
def delete(self, mark_for_deletion=True):
if self.public and mark_for_deletion:
self.publish_state = Publishable.PUBLISH_DELETE
self.save(mark_changed=False)
else:
super(Publishable, self).delete()
def undelete(self):
self.publish_state = Publishable.PUBLISH_CHANGED
self.save(mark_changed=False)
def _pre_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
sender = self.__class__
pre_publish.send(sender=sender, instance=self, deleted=deleted)
def _post_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
# we need to make sure we get the instance that actually
# got published (in case it was indirectly published elsewhere)
sender = self.__class__
instance = all_published.original(self)
post_publish.send(sender=sender, instance=instance, deleted=deleted)
def publish(self, dry_run=False, all_published=None, parent=None):
'''
either publish changes or deletions, depending on
whether this model is public or draft.
public models will be examined to see if they need deleting
and deleted if so.
'''
if self.is_public:
raise PublishException("Cannot publish public model - publish should be called from draft model")
if self.pk is None:
raise PublishException("Please save model before publishing")
if self.publish_state == Publishable.PUBLISH_DELETE:
self.publish_deletions(dry_run=dry_run, all_published=all_published, parent=parent)
return None
else:
return self.publish_changes(dry_run=dry_run, all_published=all_published, parent=parent)
def unpublish(self, dry_run=False):
'''
unpublish models by deleting public model
'''
if self.is_public:
raise UnpublishException("Cannot unpublish a public model - unpublish should be called from draft model")
if self.pk is None:
raise UnpublishException("Please save the model before unpublishing")
public_model = self.public
if public_model and not dry_run:
self.public = None
self.save()
public_model.delete(mark_for_deletion=False)
return public_model
def _get_public_or_publish(self, *arg, **kw):
# only publish if we don't yet have an id for the
# public model
if self.public:
return self.public
return self.publish(*arg, **kw)
def _get_through_model(self, field_object):
'''
Get the "through" model associated with this field.
Need to handle things differently for Django1.1 vs Django1.2
In 1.1 through is a string and through_model has class
In 1.2 through is the class
'''
through = field_object.rel.through
if through:
if isinstance(through, basestring):
return field_object.rel.through_model
return through
return None
def _changes_need_publishing(self):
return self.publish_state == Publishable.PUBLISH_CHANGED or not self.public
def publish_changes(self, dry_run=False, all_published=None, parent=None):
'''
publish changes to the model - basically copy all of it's content to another copy in the
database.
if you set dry_run=True nothing will be written to the database. combined with
the all_published value one can therefore get information about what other models
would be affected by this function
'''
assert not self.is_public, "Cannot publish public model - publish should be called from draft model"
assert self.pk is not None, "Please save model before publishing"
# avoid mutual recursion
if all_published is None:
all_published = NestedSet()
if self in all_published:
return all_published.original(self).public
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published)
public_version = self.public
if not public_version:
public_version = self.__class__(is_public=True)
excluded_fields = self.PublishMeta.excluded_fields()
reverse_fields_to_publish = self.PublishMeta.reverse_fields_to_publish()
if self._changes_need_publishing():
# copy over regular fields
for field in self._meta.fields:
if field.name in excluded_fields:
continue
value = getattr(self, field.name)
if isinstance(field, RelatedField):
related = field.rel.to
if issubclass(related, Publishable):
if value is not None:
value = value._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self)
if not dry_run:
publish_function = self.PublishMeta.find_publish_function(field.name, setattr)
publish_function(public_version, field.name, value)
# save the public version and update
# state so we know everything is up-to-date
if not dry_run:
public_version.save()
self.public = public_version
self.publish_state = Publishable.PUBLISH_DEFAULT
self.save(mark_changed=False)
# copy over many-to-many fields
for field in self._meta.many_to_many:
name = field.name
if name in excluded_fields:
continue
m2m_manager = getattr(self, name)
public_objs = list(m2m_manager.all())
field_object, model, direct, m2m = self._meta.get_field_by_name(name)
through_model = self._get_through_model(field_object)
if through_model:
# see if we can work out which reverse relationship this is
# see if we are using our own "through" table or not
if issubclass(through_model, Publishable):
# this will be db name (e.g. with _id on end)
m2m_reverse_name = field_object.m2m_reverse_name()
for reverse_field in through_model._meta.fields:
if reverse_field.column == m2m_reverse_name:
related_name = reverse_field.name
related_field = getattr(through_model, related_name).field
reverse_name = related_field.related.get_accessor_name()
reverse_fields_to_publish.append(reverse_name)
break
continue # m2m via through table won't be dealt with here
related = field_object.rel.to
if issubclass(related, Publishable):
public_objs = [p._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self) for p in public_objs]
if not dry_run:
public_m2m_manager = getattr(public_version, name)
old_objs = public_m2m_manager.exclude(pk__in=[p.pk for p in public_objs])
public_m2m_manager.remove(*old_objs)
public_m2m_manager.add(*public_objs)
# one-to-many and one-to-one reverse relations
for obj in self._meta.get_all_related_objects():
if issubclass(obj.model, Publishable):
name = obj.get_accessor_name()
if name in excluded_fields:
continue
if name not in reverse_fields_to_publish:
continue
if obj.field.rel.multiple:
related_items = getattr(self, name).all()
else:
try:
related_items = [getattr(self, name)]
except obj.model.DoesNotExist:
related_items = []
for related_item in related_items:
related_item.publish(dry_run=dry_run, all_published=all_published, parent=self)
# make sure we tidy up anything that needs deleting
if self.public and not dry_run:
if obj.field.rel.multiple:
public_ids = [r.public_id for r in related_items]
deleted_items = getattr(self.public, name).exclude(pk__in=public_ids)
deleted_items.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published)
return public_version
def publish_deletions(self, all_published=None, parent=None, dry_run=False):
'''
actually delete models that have been marked for deletion
'''
if self.publish_state != Publishable.PUBLISH_DELETE:
return
if all_published is None:
all_published = NestedSet()
if self in all_published:
return
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published, deleted=True)
for related in self._meta.get_all_related_objects():
if not issubclass(related.model, Publishable):
continue
name = related.get_accessor_name()
if name in self.PublishMeta.excluded_fields():
continue
try:
instances = getattr(self, name).all()
except AttributeError:
instances = [getattr(self, name)]
for instance in instances:
instance.publish_deletions(all_published=all_published, parent=self, dry_run=dry_run)
if not dry_run:
public = self.public
self.delete(mark_for_deletion=False)
if public:
public.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published, deleted=True)
if getattr(settings, 'TESTING_PUBLISH', False):
# classes to test that publishing etc work ok
from datetime import datetime
class Site(models.Model):
title = models.CharField(max_length=100)
domain = models.CharField(max_length=100)
class FlatPage(Publishable):
url = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
enable_comments = models.BooleanField()
template_name = models.CharField(max_length=70, blank=True)
registration_required = models.BooleanField()
sites = models.ManyToManyField(Site)
class Meta:
ordering = ['url']
def get_absolute_url(self):
if self.is_public:
return self.url
return '%s*' % self.url
class Author(Publishable):
name = models.CharField(max_length=100)
profile = models.TextField(blank=True)
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = ['authorprofile']
class AuthorProfile(Publishable):
author = models.OneToOneField(Author)
extra_profile = models.TextField(blank=True)
class ChangeLog(models.Model):
changed = models.DateTimeField(db_index=True, auto_now_add=True)
message = models.CharField(max_length=200)
class Tag(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
# publishable model with a reverse relation to
# page (as a child)
class PageBlock(Publishable):
page=models.ForeignKey('Page')
content = models.TextField(blank=True)
# non-publishable reverse relation to page (as a child)
class Comment(models.Model):
page=models.ForeignKey('Page')
comment = models.TextField()
def update_pub_date(page, field_name, value):
# ignore value entirely and replace with now
setattr(page, field_name, update_pub_date.pub_date)
update_pub_date.pub_date = datetime.now()
class Page(Publishable):
slug = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
pub_date = models.DateTimeField(default=datetime.now)
parent = models.ForeignKey('self', blank=True, null=True)
authors = models.ManyToManyField(Author, blank=True)
log = models.ManyToManyField(ChangeLog, blank=True)
tags = models.ManyToManyField(Tag, through='PageTagOrder', blank=True)
class Meta:
ordering = ['slug']
class PublishMeta(Publishable.PublishMeta):
publish_exclude_fields = ['log']
publish_reverse_fields = ['pageblock_set']
publish_functions = { 'pub_date': update_pub_date }
def get_absolute_url(self):
if not self.parent:
return u'/%s/' % self.slug
return '%s%s/' % (self.parent.get_absolute_url(), self.slug)
class PageTagOrder(Publishable):
# note these are named in non-standard way to
# ensure we are getting correct names
tagged_page=models.ForeignKey(Page)
page_tag=models.ForeignKey(Tag)
tag_order=models.IntegerField()
| 38.249027 | 142 | 0.625992 | [
"BSD-3-Clause"
] | team-35/django-publish | publish/models.py | 19,660 | Python |
import datetime
import numpy as np
import os
import pandas as pd
import psycopg2
from dotenv import load_dotenv, find_dotenv
from flask import current_app as app
from flask import json, jsonify, request
load_dotenv()
############################################################################################################
'''Verify the credentials before running deployment. '''
############################################################################################################
@app.route("/")
def home_view():
return "<h1>Welcome to Sauti DS</h1>"
@app.route('/verifyconn', methods=['GET'])
def verify_db_conn():
'''
Verifies the connection to the db.
'''
try:
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
return 'Connection verified.'
except:
return 'Connection failed.'
finally:
if (labs_conn):
labs_conn.close()
@app.errorhandler(404)
def page_not_found(e):
return '<h1>Error 404</h1><p> Sorry, I cannot show anything arround here.</p><img src="/static/404.png">', 404
###############################################################
############# Pulling all the data from tables. #############
###############################################################
@app.route("/wholesale/data-quality/")
def get_table_dqws():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT * FROM qc_wholesale_observed_price;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns=[
"id", "market", "product", "source",
"start", "end", "timeliness", "data_length",
"completeness", "duplicates", "mode_D", "data_points",
"DQI", "DQI_cat"
])
Q_select_all = """SELECT * FROM markets;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rowsM = labs_curs.fetchall()
dfM = pd.DataFrame(rowsM, columns=["id", "market_id", "market_name", "country_code"])
Q_select_all = """SELECT id, source_name FROM sources;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rowsM = labs_curs.fetchall()
dfS = pd.DataFrame(rowsM, columns=["id", "source_name"])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
merged = df.merge(dfM, left_on='market', right_on='market_id')
merged["id"] = merged["id_x"]
merged = merged.drop(["id_x", "id_y", "market_id"], axis=1)
merged = merged.merge(dfS, left_on='source', right_on='id')
merged["id"] = merged["id_x"]
merged = merged.drop(["id_x", "id_y", "source"], axis=1)
cols = ['id', 'market_name','country_code', 'product', 'source_name', 'start', 'end', 'timeliness',
'data_length', 'completeness', 'duplicates', 'mode_D', 'data_points',
'DQI', 'DQI_cat']
merged = merged[cols]
merged['start'] = merged['start'] .apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
merged['end'] = merged['end'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
merged['price_category'] = "wholesale"
merged['DQI'] = merged['DQI'].apply(lambda x: round(x,4) if type(x) == float else None)
merged['completeness'] = (merged['completeness'].apply(lambda x: round(x*100,2) if type(x) == float else None)).astype(str) + ' %'
result = []
for _, row in merged.iterrows():
result.append(dict(row))
return jsonify(result)
@app.route("/retail/data-quality/")
def get_table_dqrt():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT * FROM qc_retail_observed_price;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns=[
"id", "market", "product", "source",
"start", "end", "timeliness", "data_length",
"completeness", "duplicates", "mode_D", "data_points",
"DQI", "DQI_cat"
])
Q_select_all = """SELECT * FROM markets;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rowsM = labs_curs.fetchall()
dfM = pd.DataFrame(rowsM, columns=["id", "market_id", "market_name", "country_code"])
Q_select_all = """SELECT id, source_name FROM sources;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rowsM = labs_curs.fetchall()
dfS = pd.DataFrame(rowsM, columns=["id", "source_name"])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
merged = df.merge(dfM, left_on='market', right_on='market_id')
merged["id"] = merged["id_x"]
merged = merged.drop(["id_x", "id_y", "market_id"], axis=1)
merged = merged.merge(dfS, left_on='source', right_on='id')
merged["id"] = merged["id_x"]
merged = merged.drop(["id_x", "id_y", "source"], axis=1)
cols = ['id', 'market_name','country_code', 'product', 'source_name', 'start', 'end', 'timeliness',
'data_length', 'completeness', 'duplicates', 'mode_D', 'data_points',
'DQI', 'DQI_cat']
merged = merged[cols]
merged['start'] = merged['start'] .apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
merged['end'] = merged['end'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
merged['price_category'] = "retail"
merged['DQI'] = merged['DQI'].apply(lambda x: round(x,4) if type(x) == float else None)
merged['completeness'] = (merged['completeness'].apply(lambda x: round(x*100,2) if type(x) == float else None)).astype(str) + ' %'
result = []
for _, row in merged.iterrows():
result.append(dict(row))
return jsonify(result)
@app.route("/wholesale/price-status/")
def get_table_psws():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM wholesale_prices;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "wholesale"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return json.dumps(result, indent=4)
@app.route("/retail/price-status/")
def get_table_psrt():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM retail_prices;"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "retail"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return json.dumps(result, indent=4)
@app.route("/wholesale/labeled/")
def get_table_psws_labeled():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM wholesale_prices
WHERE observed_alps_class IS NOT NULL
OR observed_arima_alps_class IS NOT NULL;
"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "wholesale"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return jsonify(result)
@app.route("/retail/labeled/")
def get_table_psrt_labeled():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM wholesale_prices
WHERE observed_alps_class IS NOT NULL
OR observed_arima_alps_class IS NOT NULL;
"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "retail"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return jsonify(result)
@app.route("/wholesale/labeled/latest/")
def get_table_psws_labeled_latest():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM wholesale_prices
WHERE observed_alps_class IS NOT NULL
OR observed_arima_alps_class IS NOT NULL;
"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
list_to_drop = df[df.sort_values(by=['date_price'], ascending=False).duplicated(['product_name', 'market_name', 'source_name','currency_code'], keep='first')].index
df = df.drop(labels = list_to_drop, axis=0)
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "wholesale"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return jsonify(result)
@app.route("/retail/labeled/latest/")
def get_table_psrt_labeled_latest():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
Q_select_all = """SELECT product_name, market_name, country_code,
source_name, currency_code, date_price,
observed_price, observed_alps_class, alps_type_method,
alps_stressness, observed_arima_alps_class, arima_alps_stressness
FROM wholesale_prices
WHERE observed_alps_class IS NOT NULL
OR observed_arima_alps_class IS NOT NULL;
"""
labs_curs.execute(Q_select_all)
# print("\nSELECT * Query Excecuted.")
rows = labs_curs.fetchall()
df = pd.DataFrame(rows, columns= [
"product_name", "market_name", "country_code", "source_name",
"currency_code", "date_price", "observed_price",
"observed_alps_class", "alps_type_method", "alps_stressness",
"observed_arima_alps_class", "arima_alps_stressness"
])
labs_curs.close()
labs_conn.close()
# print("Cursor and Connection Closed.")
list_to_drop = df[df.sort_values(by=['date_price'], ascending=False).duplicated(['product_name', 'market_name', 'source_name','currency_code'], keep='first')].index
df = df.drop(labels = list_to_drop, axis=0)
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (nan %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df['price_category'] = "retail"
result = []
for _, row in df.iterrows():
result.append(dict(row))
return jsonify(result)
########################################################################
############# Pulling specific product market pair data. #############
########################################################################
@app.route('/raw/')
def query_raw_data():
query_parameters = request.args
product_name = query_parameters.get('product_name')
market_name = query_parameters.get('market_name')
country_code = query_parameters.get('country_code')
source_name = query_parameters.get('source_name')
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if not source_id:
return 'That source name is not in the db.'
else:
source_id = source_id[0][0]
query = '''
SELECT *
FROM raw_table
WHERE
'''
to_filter = []
if product_name:
query += ' product_name=%s AND'
to_filter.append(product_name)
if market_name and country_code:
market_id = market_name + ' : ' + country_code
query += ' market_id=%s AND'
to_filter.append(market_id)
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if source_id:
source_id = source_id[0][0]
query += ' source_id = %s AND'
to_filter.append(source_id)
if not (product_name and market_name and country_code):
return page_not_found(404)
query = query[:-4] + ';'
labs_curs.execute(query, to_filter)
result = labs_curs.fetchall()
if result:
return jsonify(result)
else:
return page_not_found(404)
if labs_conn:
labs_conn.close()
@app.route('/retail/')
def query_retail_data():
query_parameters = request.args
product_name = query_parameters.get('product_name')
market_name = query_parameters.get('market_name')
country_code = query_parameters.get('country_code')
source_name = query_parameters.get('source_name')
currency_code = query_parameters.get('currency_code')
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if not source_id:
return 'That source name is not in the db.'
else:
source_id = source_id[0][0]
query_0 = '''
SELECT *
FROM retail_prices
WHERE
'''
query_1 = '''
SELECT *
FROM retail_stats
WHERE
'''
to_filter = []
if product_name:
query_0 += ' product_name=%s AND'
query_1 += ' product_name=%s AND'
to_filter.append(product_name)
if market_name and country_code:
market_id = market_name + ' : ' + country_code
query_0 += ' market_id=%s AND'
query_1 += ' market_id=%s AND'
to_filter.append(market_id)
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if source_id:
source_id = source_id[0][0]
query_0 += ' source_id = %s AND'
query_1 += ' source_id = %s AND'
to_filter.append(source_id)
else:
labs_curs.execute('''
SELECT source_id
FROM retail_prices
WHERE product_name = %s
AND market_id = %s
GROUP BY source_id
ORDER BY count(source_id) DESC;
''', (product_name,market_id))
source_id = labs_curs.fetchall()
if source_id:
source_id = source_id[0][0]
query_0 += ' source_id = %s AND'
query_1 += ' source_id = %s AND'
to_filter.append(source_id)
if currency_code:
query_0 += ' currency_code = %s AND'
query_1 += ' currency_code = %s AND'
to_filter.append(currency_code)
else:
labs_curs.execute('''
SELECT currency_code
from retail_prices
WHERE product_name = %s
AND market_id = %s
GROUP BY currency_code
ORDER BY count(currency_code) DESC;
''', (product_name,market_id))
currency_code = labs_curs.fetchall()
if currency_code:
currency_code = currency_code[0][0]
query_0 += ' currency_code = %s AND'
query_1 += ' currency_code = %s AND'
to_filter.append(currency_code)
if not (product_name and market_name and country_code):
return page_not_found(404)
query_0 = query_0[:-4] + ';'
query_1 = query_1[:-4] + ';'
labs_curs.execute(query_0, to_filter)
result = labs_curs.fetchall()
labs_curs.execute('''
SELECT category_id
FROM products
WHERE product_name = %s
''', (product_name,))
category_id = labs_curs.fetchall()[0][0]
labs_curs.execute('''
SELECT category_name
FROM categories
WHERE id = %s
''', (category_id,))
product_category = labs_curs.fetchall()
if product_category:
product_category = product_category[0][0]
else:
product_category = 'Unknown'
if result:
df = pd.DataFrame(result, columns=['id', 'product_name','market_id','market_name', 'country_code','source_id',
'source_name', 'currency_code', 'unit_scale', 'date_price', 'observed_price',
'observed_alps_class', 'alps_type_method', 'forecasted_price', 'forecasted_class',
'forecasting_model', 'trending', 'normal_band_limit', 'stress_band_limit', 'alert_band_limit',
'alps_stressness', 'date_run_model', 'observed_arima_alps_class', 'arima_alps_stressness'])
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (None %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df = df.drop(labels=['id'],axis=1)
prices_stats = df[['date_price','observed_price']].sort_values(by=['observed_price'])
min_price_date = prices_stats.iloc[0,0]
min_price_value = round(prices_stats.iloc[0,1],2)
max_price_date = prices_stats.iloc[-1,0]
max_price_value = round(prices_stats.iloc[-1,1],2)
mean_price_value= round(df['observed_price'].mean(),2)
labs_curs.execute(query_1,to_filter)
stats = labs_curs.fetchall()
if stats:
stats_dict = {'product_category':product_category,'price_category' : 'Retail','start_date' : datetime.date.strftime(stats[0][5],"%Y-%m-%d"), 'end_date': datetime.date.strftime(stats[0][6],"%Y-%m-%d"), 'Mode_D': stats[0][12], 'number_of_observations': stats[0][13], 'mean': mean_price_value, 'min_price_date': min_price_date, 'min_price': min_price_value, 'max_price_date': max_price_date, 'max_price': max_price_value, 'days_between_start_end': stats[0][21], 'completeness': str(round(stats[0][22]*100 / .7123,2)) + ' %', 'DQI': 'not available', 'DQI_cat': 'not available'}
labs_curs.execute('''
SELECT *
FROM qc_retail_observed_price
WHERE product_name = %s
AND market_id = %s
''', (product_name,market_id))
DQI_info = labs_curs.fetchall()
if DQI_info:
stats_dict['DQI'] = round(DQI_info[0][-2],2)
stats_dict['DQI_cat'] = DQI_info[0][-1].capitalize()
else:
stats_dict = {'product_data':'missing'}
return jsonify(quality = stats_dict, history = df.to_dict('records'))
else:
return page_not_found(404)
if labs_conn:
labs_conn.close()
@app.route('/wholesale/')
def query_wholesale_data():
query_parameters = request.args
product_name = query_parameters.get('product_name')
market_name = query_parameters.get('market_name')
country_code = query_parameters.get('country_code')
source_name = query_parameters.get('source_name')
currency_code = query_parameters.get('currency_code')
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if not source_id:
return 'That source name is not in the db.'
else:
source_id = source_id[0][0]
query_0 = '''
SELECT *
FROM wholesale_prices
WHERE
'''
query_1 = '''
SELECT *
FROM wholesale_stats
WHERE
'''
to_filter = []
if product_name:
query_0 += ' product_name=%s AND'
query_1 += ' product_name=%s AND'
to_filter.append(product_name)
if market_name and country_code:
market_id = market_name + ' : ' + country_code
query_0 += ' market_id=%s AND'
query_1 += ' market_id=%s AND'
to_filter.append(market_id)
if source_name:
labs_curs.execute('''
SELECT id
FROM sources
WHERE source_name = %s
''', (source_name,))
source_id = labs_curs.fetchall()
if source_id:
source_id = source_id[0][0]
query_0 += ' source_id = %s AND'
query_1 += ' source_id = %s AND'
to_filter.append(source_id)
else:
labs_curs.execute('''
SELECT source_id
FROM wholesale_prices
WHERE product_name = %s
AND market_id = %s
GROUP BY source_id
ORDER BY count(source_id) DESC;
''', (product_name,market_id))
source_id = labs_curs.fetchall()
if source_id:
source_id = source_id[0][0]
query_0 += ' source_id = %s AND'
query_1 += ' source_id = %s AND'
to_filter.append(source_id)
if currency_code:
query_0 += ' currency_code = %s AND'
query_1 += ' currency_code = %s AND'
to_filter.append(currency_code)
else:
labs_curs.execute('''
SELECT currency_code
from wholesale_prices
WHERE product_name = %s
AND market_id = %s
GROUP BY currency_code
ORDER BY count(currency_code) DESC;
''', (product_name,market_id))
currency_code = labs_curs.fetchall()
if currency_code:
currency_code = currency_code[0][0]
query_0 += ' currency_code = %s AND'
query_1 += ' currency_code = %s AND'
to_filter.append(currency_code)
if not (product_name and market_name and country_code):
return page_not_found(404)
query_0 = query_0[:-4] + ';'
query_1 = query_1[:-4] + ';'
labs_curs.execute(query_0, to_filter)
result = labs_curs.fetchall()
labs_curs.execute('''
SELECT category_id
FROM products
WHERE product_name = %s
''', (product_name,))
category_id = labs_curs.fetchall()[0][0]
labs_curs.execute('''
SELECT category_name
FROM categories
WHERE id = %s
''', (category_id,))
product_category = labs_curs.fetchall()
if product_category:
product_category = product_category[0][0]
else:
product_category = 'Unknown'
if result:
df = pd.DataFrame(result, columns=['id', 'product_name','market_id','market_name', 'country_code','source_id',
'source_name', 'currency_code', 'unit_scale', 'date_price', 'observed_price',
'observed_alps_class', 'alps_type_method', 'forecasted_price', 'forecasted_class',
'forecasting_model', 'trending', 'normal_band_limit', 'stress_band_limit', 'alert_band_limit',
'alps_stressness', 'date_run_model', 'observed_arima_alps_class', 'arima_alps_stressness'])
df['date_price'] = df['date_price'].apply(lambda x: datetime.date.strftime(x,"%Y-%m-%d"))
df['alps_stressness'] = df['alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['alps_stressness'] = df['alps_stressness'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'].astype(str)
df['observed_alps_class'] = df['observed_alps_class'] + ' ('+ df['alps_stressness'] + ' %)'
df['alps_type_method'] = df['alps_type_method'].astype(str)
df['arima_alps_stressness'] = df['arima_alps_stressness'].apply(lambda x: round(x*100,2) if type(x) == float else None)
df['arima_alps_stressness'] = df['arima_alps_stressness'].astype(str)
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].astype(str) + ' ('+ df['arima_alps_stressness'] + ' %)'
df['observed_alps_class'] = df['observed_alps_class'].replace('None (nan %)', 'Not available')
df['alps_stressness'] = df['alps_stressness'].replace('nan', 'Not available')
df['alps_type_method'] = df['alps_type_method'].replace('None', 'Not available')
df['observed_arima_alps_class'] = df['observed_arima_alps_class'].replace('None (None %)', 'Not available')
df['arima_alps_stressness'] = df['arima_alps_stressness'].replace('nan', 'Not available')
df = df.drop(labels=['id'],axis=1)
prices_stats = df[['date_price','observed_price']].sort_values(by=['observed_price'])
min_price_date = prices_stats.iloc[0,0]
min_price_value = round(prices_stats.iloc[0,1],2)
max_price_date = prices_stats.iloc[-1,0]
max_price_value = round(prices_stats.iloc[-1,1],2)
mean_price_value= round(df['observed_price'].mean(),2)
labs_curs.execute(query_1,to_filter)
stats = labs_curs.fetchall()
if stats:
stats_dict = {'product_category':product_category,'price_category' : 'Wholesale','start_date' : datetime.date.strftime(stats[0][5],"%Y-%m-%d"), 'end_date': datetime.date.strftime(stats[0][6],"%Y-%m-%d"), 'Mode_D': stats[0][12], 'number_of_observations': stats[0][13], 'mean': mean_price_value, 'min_price_date': min_price_date, 'min_price': min_price_value, 'max_price_date': max_price_date, 'max_price': max_price_value, 'days_between_start_end': stats[0][21], 'completeness': str(round(stats[0][22]*100 / .7123,2)) + ' %', 'DQI': 'not available', 'DQI_cat': 'not available'}
labs_curs.execute('''
SELECT *
FROM qc_wholesale_observed_price
WHERE product_name = %s
AND market_id = %s
''', (product_name,market_id))
DQI_info = labs_curs.fetchall()
if DQI_info:
stats_dict['DQI'] = round(DQI_info[0][-2],2)
stats_dict['DQI_cat'] = DQI_info[0][-1].capitalize()
else:
stats_dict = {'product_data':'missing'}
return jsonify(quality = stats_dict, history = df.to_dict('records'))
else:
return page_not_found(404)
if labs_conn:
labs_conn.close()
@app.route("/availablepairsobjects/")
def get_available_pairs_objects():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
all_pairs = {'retail':None, 'wholesale':None}
labs_curs.execute('''
SELECT country_code
FROM countries
''')
countries = labs_curs.fetchall()
if countries:
countries = [x[0] for x in countries]
country_market_product_pairs = {country: None for country in countries}
for country in countries:
labs_curs.execute('''
SELECT market_name
FROM markets
WHERE country_code = %s
''', (country,))
markets = labs_curs.fetchall()
if markets:
markets = [x[0] for x in markets]
country_market_product_pairs[country]= {market : None for market in markets}
retail_pairs = country_market_product_pairs.copy()
wholesale_pairs = country_market_product_pairs.copy()
for market in markets:
# retail query
labs_curs.execute('''
SELECT DISTINCT(product_name)
FROM retail_prices
WHERE country_code = %s
AND market_name = %s
''', (country,market))
products = labs_curs.fetchall()
if products:
products = [x[0] for x in products]
retail_pairs[country][market] = products
all_pairs['retail'] = retail_pairs
# wholesale query
labs_curs.execute('''
SELECT DISTINCT(product_name)
FROM wholesale_prices
WHERE country_code = %s
AND market_name = %s
''', (country,market))
products = labs_curs.fetchall()
if products:
products = [x[0] for x in products]
wholesale_pairs[country][market] = products
all_pairs['wholesale'] = retail_pairs
else:
del wholesale_pairs[country][market]
keys_to_drop = []
for sale_type in ['retail', 'wholesale']:
for key in all_pairs[sale_type].keys():
if not all_pairs[sale_type][key]:
keys_to_drop.append(key)
for key in keys_to_drop:
del all_pairs[sale_type][key]
keys_to_drop = []
labs_curs.close()
labs_conn.close()
return jsonify(all_pairs)
@app.route("/availablepairs/")
def get_available_pairs():
labs_conn = psycopg2.connect(user=os.environ.get('aws_db_user'),
password=os.environ.get('aws_db_password'),
host=os.environ.get('aws_db_host'),
port=os.environ.get('aws_db_port'),
database=os.environ.get('aws_db_name'))
labs_curs = labs_conn.cursor()
all_pairs = [{'retail':None, 'wholesale':None}]
labs_curs.execute('''
SELECT country_code
FROM countries
''')
countries = labs_curs.fetchall()
if countries:
countries = [x[0] for x in countries]
country_market_product_pairs = {country: None for country in countries}
for country in countries:
labs_curs.execute('''
SELECT market_name
FROM markets
WHERE country_code = %s
''', (country,))
markets = labs_curs.fetchall()
if markets:
markets = [x[0] for x in markets]
country_market_product_pairs[country]= [{market : None for market in markets}]
retail_pairs = country_market_product_pairs.copy()
wholesale_pairs = country_market_product_pairs.copy()
for market in markets:
# retail query
labs_curs.execute('''
SELECT DISTINCT(product_name)
FROM retail_prices
WHERE country_code = %s
AND market_name = %s
''', (country,market))
products = labs_curs.fetchall()
if products:
products = [x[0] for x in products]
retail_pairs[country][0][market] = products
all_pairs[0]['retail'] = [retail_pairs]
# wholesale query
labs_curs.execute('''
SELECT DISTINCT(product_name)
FROM wholesale_prices
WHERE country_code = %s
AND market_name = %s
''', (country,market))
products = labs_curs.fetchall()
if products:
products = [x[0] for x in products]
wholesale_pairs[country][0][market] = products
all_pairs[0]['wholesale'] = [wholesale_pairs]
else:
del wholesale_pairs[country][0][market]
labs_curs.close()
labs_conn.close()
keys_to_drop = []
for sale_type in ['retail', 'wholesale']:
for key in all_pairs[0][sale_type][0].keys():
if not all_pairs[0][sale_type][0][key]:
keys_to_drop.append(key)
for key in keys_to_drop:
del all_pairs[0][sale_type][0][key]
keys_to_drop = []
return jsonify(all_pairs)
| 37.487337 | 588 | 0.583937 | [
"MIT"
] | Minaramzey/Sauti-Africa-Market-Monitoring-DS | routes.py | 48,846 | Python |
from dagster import check
from dagster.core.types.marshal import PickleSerializationStrategy
from .errors import DagstermillError, DagsterUserCodeExecutionError
from .manager import Manager, MANAGER_FOR_NOTEBOOK_INSTANCE
from .serialize import SerializableRuntimeType, read_value
from .solids import define_dagstermill_solid
# magic incantation for syncing up notebooks to enclosing virtual environment.
# I don't claim to understand it.
# ipython kernel install --name "dagster" --user
# python3 -m ipykernel install --user
def register_repository(repo_def):
return MANAGER_FOR_NOTEBOOK_INSTANCE.register_repository(repo_def)
def deregister_repository():
return MANAGER_FOR_NOTEBOOK_INSTANCE.deregister_repository()
def yield_result(value, output_name='result'):
'''Explicitly yield a Output.
Args:
value (Any): The value of the Output to yield.
output_name (Optional[str]): The name of the Output to yield. Default: 'result'.
'''
return MANAGER_FOR_NOTEBOOK_INSTANCE.yield_result(value, output_name)
def yield_event(dagster_event):
'''Explicitly yield a dagster event such as a Materialization or ExpectationResult
'''
return MANAGER_FOR_NOTEBOOK_INSTANCE.yield_event(dagster_event)
def populate_context(dm_context_data):
check.dict_param(dm_context_data, 'dm_context_data')
context = MANAGER_FOR_NOTEBOOK_INSTANCE.populate_context(**dm_context_data)
return context
def load_parameter(input_name, input_value):
check.invariant(MANAGER_FOR_NOTEBOOK_INSTANCE.populated_by_papermill, 'populated_by_papermill')
if MANAGER_FOR_NOTEBOOK_INSTANCE.solid_def is None:
check.invariant(
MANAGER_FOR_NOTEBOOK_INSTANCE.input_name_type_dict is not None,
'input_name_type_dict must not be None if solid_def is not defined!',
)
input_name_type_dict = MANAGER_FOR_NOTEBOOK_INSTANCE.input_name_type_dict
runtime_type_enum = input_name_type_dict[input_name]
if (
runtime_type_enum == SerializableRuntimeType.SCALAR
or runtime_type_enum == SerializableRuntimeType.JSON_SERIALIZABLE
):
return input_value
elif runtime_type_enum == SerializableRuntimeType.PICKLE_SERIALIZABLE:
return PickleSerializationStrategy().deserialize_from_file(input_value)
else:
raise DagstermillError(
"loading parameter {input_name} resulted in an error".format(input_name=input_name)
)
else:
solid_def = MANAGER_FOR_NOTEBOOK_INSTANCE.solid_def
input_def = solid_def.input_def_named(input_name)
return read_value(input_def.runtime_type, input_value)
def get_context(config=None):
if not MANAGER_FOR_NOTEBOOK_INSTANCE.populated_by_papermill:
MANAGER_FOR_NOTEBOOK_INSTANCE.define_out_of_pipeline_context(config)
return MANAGER_FOR_NOTEBOOK_INSTANCE.context
def teardown():
MANAGER_FOR_NOTEBOOK_INSTANCE.teardown_resources()
| 37 | 99 | 0.765766 | [
"Apache-2.0"
] | atsuhiro/dagster | python_modules/dagstermill/dagstermill/__init__.py | 2,997 | Python |
from __future__ import annotations
def recursive_binary_search(l: list, target: int, low: int, high: int) -> int | None:
mid = low + (high - low) // 2
if mid < len(l):
if l[mid] == target:
return mid
elif target > l[mid]:
return recursive_binary_search(l, target, mid + 1, high)
else:
return recursive_binary_search(l, target, low, mid - 1)
return None
if __name__ == '__main__':
l = [3, 4, 8, 9, 14, 34, 41, 49, 58, 65, 69, 77, 81, 85, 88]
print(recursive_binary_search(l, 3, 0, len(l)))
print(recursive_binary_search(l, 41, 0, len(l)))
print(recursive_binary_search(l, 88, 0, len(l)))
print(recursive_binary_search(l, 89, 0, len(l))) | 36.5 | 85 | 0.6 | [
"MIT"
] | Pyt45/algorithms-dataStructure-python | Search/RecursiveBinarySearch.py | 730 | Python |
from __future__ import unicode_literals
from django.contrib import admin
from django.conf import settings
from django.conf.urls import url
from django import forms
from django.core.urlresolvers import reverse
from django.contrib.admin.utils import quote
from django.utils.translation import ugettext_lazy as _, ugettext, ungettext
from django.utils.html import format_html
from django.utils import timezone
from django.contrib import messages
from django.db.models.signals import post_save
from django.db.models import Min, Q
from django.contrib.sites.models import Site
from mezzanine.utils.sites import current_site_id
from mezzanine.pages.admin import PageAdmin
try:
from mezzanine.pages.admin import PageAdminForm
except ImportError:
PageAdminForm = forms.ModelForm
from mezzanine.core.models import (CONTENT_STATUS_PUBLISHED,
CONTENT_STATUS_DRAFT)
from widgy.forms import WidgyFormMixin, VersionedWidgyWidget
from widgy.contrib.widgy_mezzanine import get_widgypage_model
from widgy.contrib.widgy_mezzanine.views import ClonePageView, UnpublishView
from widgy.contrib.page_builder.admin import CalloutAdmin
from widgy.contrib.page_builder.models import Callout
from widgy.contrib.form_builder.admin import FormAdmin
from widgy.contrib.form_builder.models import Form
from widgy.db.fields import get_site
from widgy.models import Node
from widgy.admin import WidgyAdmin
WidgyPage = get_widgypage_model()
if 'widgy.contrib.review_queue' in settings.INSTALLED_APPS:
REVIEW_QUEUE_INSTALLED = True
from widgy.contrib.review_queue.site import ReviewedWidgySite
else:
REVIEW_QUEUE_INSTALLED = False
class PageVersionedWidgyWidget(VersionedWidgyWidget):
template_name = 'widgy/widgy_mezzanine/versioned_widgy_field.html'
class WidgyPageAdminForm(WidgyFormMixin, PageAdminForm):
class Meta:
model = WidgyPage
widgets = {
'root_node': PageVersionedWidgyWidget,
}
fields = '__all__'
def __init__(self, *args, **kwargs):
super(WidgyPageAdminForm, self).__init__(*args, **kwargs)
self.fields['publish_date'].help_text = _(
"If you enter a date here, the page will not be viewable on the site until then"
)
self.fields['expiry_date'].help_text = _(
"If you enter a date here, the page will not be viewable after this time"
)
if self.instance.pk is None:
self.instance.status = CONTENT_STATUS_DRAFT
# the status of a page before it's created, on the add page
CONTENT_STATUS_EMBRYO = 0
class WidgyPageAdmin(PageAdmin):
change_form_template = 'widgy/widgy_mezzanine/widgypage_change_form.html'
form = WidgyPageAdminForm
readonly_fields = ['status']
unreviewed_buttons = {
CONTENT_STATUS_EMBRYO : [('_continue', _('Save'))],
CONTENT_STATUS_DRAFT : [('_continue', _('Save as Draft')),
('_save_and_commit', _('Publish'))],
CONTENT_STATUS_PUBLISHED : [('_save_and_commit', _('Publish Changes'))],
}
reviewed_buttons = {
(CONTENT_STATUS_EMBRYO, False) : [('_continue', _('Save'))],
(CONTENT_STATUS_EMBRYO, True) : [('_continue', _('Save'))],
(CONTENT_STATUS_DRAFT, False) : [('_continue', _('Save as Draft')),
('_save_and_commit', _('Submit for Review'))],
(CONTENT_STATUS_DRAFT, True) : [('_continue', _('Save as Draft')),
('_save_and_commit', _('Submit for Review')),
('_save_and_approve', _('Publish'))],
(CONTENT_STATUS_PUBLISHED, False) : [('_save_and_commit', _('Submit for Review'))],
(CONTENT_STATUS_PUBLISHED, True) : [('_save_and_commit', _('Submit for Review')),
('_save_and_approve', _('Publish Changes'))],
}
def get_urls(self):
clone_view = ClonePageView.as_view(
model=self.model,
has_permission=self.has_add_permission,
)
unpublish_view = UnpublishView.as_view(
model=self.model,
has_change_permission=self.has_change_permission,
)
return [
url(
'^(.+)/clone/$',
self.admin_site.admin_view(clone_view),
name='widgy_mezzanine_widgypage_clone',
),
url(
'^(.+)/unpublish/$',
self.admin_site.admin_view(unpublish_view),
name='widgy_mezzanine_widgypage_unpublish',
),
] + super(WidgyPageAdmin, self).get_urls()
def _save_and_commit(self, request, obj):
site = self.get_site()
commit_model = site.get_version_tracker_model().commit_model
if not site.has_add_permission(request, obj, commit_model):
messages.error(request, _("You don't have permission to commit."))
else:
if obj.root_node.has_changes():
obj.root_node.commit(user=request.user)
elif self.has_review_queue:
messages.warning(request, _("There was nothing to submit for review."))
if not self.has_review_queue:
obj.status = CONTENT_STATUS_PUBLISHED
# else:
# If we are reviewed, we'll have to wait for approval.
# Handled by the publish_page_on_approve signal.
def _save_and_approve(self, request, obj):
site = self.get_site()
commit_model = site.get_version_tracker_model().commit_model
if not site.has_add_permission(request, obj, commit_model) or \
not site.has_change_permission(request, commit_model):
messages.error(request, _("You don't have permission to approve commits."))
else:
if obj.root_node.has_changes():
obj.root_node.commit(request.user)
# If we had changes, `head` is the same commit we just created.
# If we didn't need to create a commit, we want to publish the
# most recent one instead.
obj.root_node.head.reviewedversioncommit.approve(request.user)
obj.root_node.head.reviewedversioncommit.save()
obj.status = CONTENT_STATUS_PUBLISHED
def save_model(self, request, obj, form, change):
if '_save_and_commit' in request.POST:
self._save_and_commit(request, obj)
elif '_save_and_approve' in request.POST and self.has_review_queue:
self._save_and_approve(request, obj)
request.POST = request.POST.copy()
request.POST['_continue'] = True
super(WidgyPageAdmin, self).save_model(request, obj, form, change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None, *args, **kwargs):
if not add:
unapproved = 0
future = 0
for commit in obj.root_node.get_history_list():
if obj.root_node.commit_is_ready(commit):
# got to the currently-published commit
break
if self.has_review_queue and not commit.reviewedversioncommit.is_approved:
unapproved += 1
if not commit.is_published:
future += 1
if unapproved:
messages.warning(request, ungettext(
"There is one unreviewed commit for this page.",
"There are {count} unreviewed commits for this page.",
unapproved
).format(count=unapproved))
if future:
messages.warning(request, ungettext(
"There is one future-scheduled commit.",
"There are {count} future-scheduled commits.",
future
).format(count=future))
site = self.get_site()
if add:
status = CONTENT_STATUS_EMBRYO
else:
status = obj.status
if self.has_review_queue:
commit_model = site.get_version_tracker_model().commit_model
can_approve = site.has_change_permission(request, commit_model)
context['save_buttons'] = self.reviewed_buttons[(status, can_approve)]
else:
context['save_buttons'] = self.unreviewed_buttons[status]
if not add:
context['history_url'] = site.reverse(site.history_view, kwargs={'pk': obj.root_node_id})
return super(WidgyPageAdmin, self).render_change_form(request, context, add, change, form_url, obj, *args, **kwargs)
@property
def has_review_queue(self):
return REVIEW_QUEUE_INSTALLED and isinstance(self.get_site(), ReviewedWidgySite)
def get_site(self):
return get_site(settings.WIDGY_MEZZANINE_SITE)
class UndeleteField(forms.ModelChoiceField):
widget = forms.RadioSelect
def __init__(self, *args, **kwargs):
self.site = kwargs.pop('site')
kwargs['queryset'] = self.get_undelete_queryset(kwargs['queryset'])
return super(UndeleteField, self).__init__(*args, **kwargs)
def get_undelete_queryset(self, layouts):
"""
Version trackers that have no references and whose content type is
allowed by our field can be restored.
"""
VersionTracker = self.site.get_version_tracker_model()
# Is it necessary to query on the HEAD content type _and_ the working
# copy content type? Can a version tracker's root node content type
# change? If it can change, which one should be used here?
#
# Just filter based on the working copy's layout, as this allows
# undeleting a version tracker that never got committed.
return VersionTracker.objects.orphan().filter(
working_copy__content_type_id__in=layouts)
def label_from_instance(self, obj):
url = reverse('widgy.contrib.widgy_mezzanine.views.preview',
kwargs={'node_pk': obj.working_copy.pk})
return format_html('<a href="{url}">{preview}</a>', url=url, preview=ugettext('preview'))
class UndeletePageAdminMixin(object):
def get_form(self, request, obj=None, **kwargs):
base = super(UndeletePageAdminMixin, self).get_form(request, obj, **kwargs)
base_field = base.base_fields['root_node']
# create a new form using an UndeleteField instead of the
# original VersionedWidgyField
return type(base.__class__)(base.__class__.__name__, (base,), {
'root_node': UndeleteField(site=base_field.site,
queryset=base_field.queryset,
empty_label=None,
label=_('root node'))
})
def response_add(self, request, obj, *args, **kwargs):
resp = super(UndeletePageAdminMixin, self).response_add(request, obj, *args, **kwargs)
if resp.status_code == 302 and resp['Location'].startswith('../'):
viewname = 'admin:%s_%s_change' % (
obj._meta.app_label,
obj._meta.model_name)
resp['Location'] = reverse(viewname, args=(quote(obj.pk),))
return resp
class UndeletePageAdmin(UndeletePageAdminMixin, WidgyPageAdmin):
pass
class UndeletePage(WidgyPage):
"""
A proxy for WidgyPage, just to allow registering WidgyPage twice with a
different ModelAdmin.
"""
class Meta:
proxy = True
app_label = WidgyPage._meta.app_label
verbose_name = _('restore deleted page')
def __init__(self, *args, **kwargs):
self._meta = super(UndeletePage, self)._meta
return super(UndeletePage, self).__init__(*args, **kwargs)
admin.site.register(WidgyPage, WidgyPageAdmin)
admin.site.register(UndeletePage, UndeletePageAdmin)
def publish_page_on_approve(sender, instance, created, **kwargs):
site = get_site(settings.WIDGY_MEZZANINE_SITE)
pages = WidgyPage.objects.filter(
root_node=instance.tracker,
)
if instance.is_approved:
pages = pages.filter(
Q(publish_date__gte=instance.publish_at) |
Q(status=CONTENT_STATUS_DRAFT)
).update(
status=CONTENT_STATUS_PUBLISHED,
publish_date=instance.publish_at,
)
elif not site.get_version_tracker_model().objects.filter(pk=instance.tracker.pk).published().exists():
# unaproving a commit, and there are no other currently published commits
CommitModel = site.get_version_tracker_model().commit_model
beginning_of_validity = CommitModel.objects.approved().filter(
tracker_id=instance.tracker.pk,
publish_at__gt=timezone.now(),
).aggregate(min=Min('publish_at'))['min']
if beginning_of_validity is not None:
# There's a scheduled commit, move publish_date of the page forward
# up to the publish_at of the commit.
pages.update(
publish_date=beginning_of_validity,
status=CONTENT_STATUS_PUBLISHED,
)
else:
# no other published commits at all, page needs to be unpublished
pages.update(
status=CONTENT_STATUS_DRAFT,
)
class MultiSiteFormAdmin(FormAdmin):
def get_queryset(self, request):
version_tracker_model = self.get_site().get_version_tracker_model()
site_pages = version_tracker_model.objects.filter(widgypage__site_id=current_site_id())
site_nodes = Node.objects.filter(versiontracker__in=site_pages)
# This query seems like it could get slow. If that's the case,
# something along these lines might be helpful:
# Node.objects.all().extra(
# tables=[
# '"widgy_node" AS "root"',
# 'widgy_versiontracker',
# 'widgy_mezzanine_widgypage',
# 'pages_page',
# ],
# where=[
# 'root.path = SUBSTR(widgy_node.path, 1, 4)',
# 'widgy_versiontracker.id = widgy_mezzanine_widgypage.root_node_id',
# 'pages_page.id = widgy_mezzanine_widgypage.page_ptr_id',
# 'pages_page.site_id = 1',
# ]
# )
qs = super(MultiSiteFormAdmin, self).get_queryset(request).filter(
_nodes__path__path_root__in=site_nodes.values_list('path'),
)
return qs
def get_site(self):
return get_site(settings.WIDGY_MEZZANINE_SITE)
admin.site.unregister(Form)
admin.site.register(Form, MultiSiteFormAdmin)
class MultiSiteCalloutAdmin(WidgyAdmin):
def get_site_list(self, request):
if not hasattr(request, '_site_list'):
# Mezzanine has a weird data model for site permissions. This optimizes the query into
# one single SQL statement. This also avoids raising an ObjectDoesNotExist error in case
# a user does not have a sitepermission object.
request._site_list = Site.objects.filter(sitepermission__user=request.user)
return request._site_list
def get_fields(self, request, obj=None):
site_list = self.get_site_list(request)
if request.user.is_superuser or len(site_list) > 1:
return ('name', 'site', 'root_node')
else:
return ('name', 'root_node')
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'site' and not request.user.is_superuser:
# See MultiSiteCalloutAdmin.get_fields() about this query
kwargs['queryset'] = self.get_site_list(request)
# Non superusers have to select a site, otherwise the callout will be global.
kwargs['required'] = True
return super(MultiSiteCalloutAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def save_model(self, request, obj, form, change):
if not change:
site_list = self.get_site_list(request)
if len(site_list) == 1:
obj.site = site_list.get()
return super(MultiSiteCalloutAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(MultiSiteCalloutAdmin, self).get_queryset(request)
if not (request.user.is_superuser or (request.user.is_staff and Site.objects.count() == 1)):
qs = qs.filter(site__sitepermission__user=request.user)
return qs
admin.site.unregister(Callout)
admin.site.register(Callout, MultiSiteCalloutAdmin)
if REVIEW_QUEUE_INSTALLED:
from widgy.contrib.review_queue.admin import VersionCommitAdminBase
from widgy.contrib.review_queue.models import ReviewedVersionCommit, ReviewedVersionTracker
class VersionCommitAdmin(VersionCommitAdminBase):
def get_site(self):
return get_site(settings.WIDGY_MEZZANINE_SITE)
def get_queryset(self, request):
qs = super(VersionCommitAdmin, self).get_queryset(request)
if not request.user.is_superuser:
sites = Site.objects.filter(sitepermission__user=request.user)
qs = qs.filter(
tracker__in=ReviewedVersionTracker.objects.filter(widgypage__site__in=sites)
)
return qs
admin.site.register(ReviewedVersionCommit, VersionCommitAdmin)
site = get_site(settings.WIDGY_MEZZANINE_SITE)
if isinstance(site, ReviewedWidgySite):
# In the tests, review_queue is installed but a ReviewedWidgySite might
# not be in use.
post_save.connect(publish_page_on_approve, sender=site.get_version_tracker_model().commit_model)
| 41.88 | 124 | 0.64425 | [
"Apache-2.0"
] | fusionbox/django-widgy | widgy/contrib/widgy_mezzanine/admin.py | 17,799 | Python |
# Using a combination of list subsetting and variable assignment, create a new variable, eat_sleep_area, that contains the sum of the area of the kitchen and the area of the bedroom.
# Print the new variable eat_sleep_area.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Sum of kitchen and bedroom area: eat_sleep_area
eat_sleep_area = areas[3] + areas[-3]
# Print the variable eat_sleep_area
print(eat_sleep_area) | 45.181818 | 182 | 0.752515 | [
"MIT"
] | paulmcheng/python-for-data-science | Chapter2-1-Lists/Subset.py | 497 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Catapult"""
"""Template task in which you prevent something from falling so ball can roll into container."""
import numpy as np
import phyre.creator as creator_lib
import phyre.virtual_tools as vt
@creator_lib.define_task_template(
seed=range(1000),
version="2",
search_params=dict(
max_search_tasks=250,
required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
),
)
def build_task(C, seed):
rng = np.random.RandomState(seed=seed)
cataWidth = [200, 400]
cataHeight = [30, 100]
ballRad = [5, 15]
strutWidth = [10, 50]
strutHeight = [60, 200]
goalWidth = [60, 150]
goalHeight = [60, 180]
cataThick = [5, 10]
spacing = [25, 100]
strutPlace = [0, 150]
## Define the features first
cW = rng.uniform(cataWidth[0], cataWidth[1])
cH = 20.
bR = rng.uniform(ballRad[0], ballRad[1])
sW = rng.uniform(strutWidth[0], strutWidth[1])
sH = rng.uniform(strutHeight[0], strutHeight[1])
gW = rng.uniform(goalWidth[0], goalWidth[1])
gH = rng.uniform(goalHeight[0], goalHeight[1])
cT = rng.uniform(cataThick[0], cataThick[1])
sp = rng.uniform(spacing[0], spacing[1])
stP = rng.uniform(strutPlace[0], strutPlace[1])
stP = min([stP, cW / 2])
flip_lr = rng.uniform(0, 1) < 0.5
## Then fit together
cataCent = vt.VT_SCALE - gW - sp - cW / 2
cataLeft = cataCent - cW / 2
## Make the world
strut = vt.add_box(
C, [cataCent - sW / 2 + stP, 0, cataCent + sW / 2 + stP, sH],
False,
flip_lr=flip_lr)
cradle = vt.add_box(C, [cataLeft, 0, cataLeft + 10, sH],
False,
flip_lr=flip_lr)
container, _ = vt.add_container(
C, [[vt.VT_SCALE - gW, gH], [vt.VT_SCALE - gW, 5], [vt.VT_SCALE - 5, 5],
[vt.VT_SCALE - 5, gH]],
10,
False,
True,
flip_lr=flip_lr)
polys = [[[cataLeft, sH], [cataLeft, sH + cT], [cataLeft + cT, sH + cT],
[cataLeft + cT, sH]],
[[cataLeft, sH + cT], [cataLeft, sH + cH],
[cataLeft + cT, sH + cH], [cataLeft + cT, sH + cT]],
[[cataLeft + cT, sH], [cataLeft + cT, sH + cT],
[cataLeft + cW, sH + cT], [cataLeft + cW, sH]]]
for p in polys:
p.reverse()
if flip_lr:
p = vt.flip_left_right(p)
center_x = vt.flip_left_right(cataLeft + cT + bR + 30)
else:
center_x = cataLeft + cT + bR + 30
converted_polylist = [
vt.convert_phyre_tools_vertices(poly) for poly in polys
]
catapult = C.add_multipolygons(polygons=converted_polylist, dynamic=True)
ball = C.add('dynamic ball',
bR * 2. / vt.VT_SCALE,
center_x=center_x * C.scene.width / vt.VT_SCALE,
center_y=(sH + cT + bR) * C.scene.width / vt.VT_SCALE)
C.update_task(body1=ball,
body2=container,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.VIRTUAL_TOOLS)
'''pgw.addBox('Strut', [cataCent - sW/2 + stP, 0, cataCent + sW/2 + stP, sH], 'black', 0)
pgw.addBox('Cradle', [cataLeft, 0, cataLeft+10, sH], 'black', 0)
pgw.addContainer('Goal', [[DIMS[0]-gW, 5], [DIMS[0]-gW, gH], [DIMS[0]-5, gH], [DIMS[0]-5, 5]], 10, 'green', 'black', 0)
pgw.addCompound('Catapult', [
[[cataLeft, sH], [cataLeft, sH+cT], [cataLeft+cT, sH+cT], [cataLeft+cT, sH]],
[[cataLeft, sH+cT], [cataLeft, sH+cH], [cataLeft+cT, sH+cH], [cataLeft+cT, sH+cT]],
[[cataLeft+cT, sH], [cataLeft+cT, sH+cT], [cataLeft+cW, sH+cT], [cataLeft+cW, sH]]
], 'blue', 1)
pgw.addBall('Ball', [cataLeft+cT+bR+30, sH+cT+bR], bR, 'red', 1)
pgw.attachSpecificInGoal('Goal', 'Ball', 1)
return pgw'''
| 37.973913 | 121 | 0.598122 | [
"Apache-2.0"
] | EltayebAhmed/phyre | data/task_scripts/main/task01002.py | 4,367 | Python |
movie = input()
director = input()
year = input()
print(f"{movie} (dir. {director}) came out in {year}")
| 17.666667 | 54 | 0.632075 | [
"MIT"
] | Answerman/Coffee-Machine | Problems/Film/task.py | 106 | Python |
from pyalgotrade import strategy
from pyalgotrade import dataseries
from pyalgotrade.dataseries import aligned
from pyalgotrade import plotter
from pyalgotrade.tools import yahoofinance
from pyalgotrade.stratanalyzer import sharpe
import numpy as np
import statsmodels.api as sm
def get_beta(values1, values2):
# http://statsmodels.sourceforge.net/stable/regression.html
model = sm.OLS(values1, values2)
results = model.fit()
return results.params[0]
class StatArbHelper:
def __init__(self, ds1, ds2, windowSize):
# We're going to use datetime aligned versions of the dataseries.
self.__ds1, self.__ds2 = aligned.datetime_aligned(ds1, ds2)
self.__windowSize = windowSize
self.__hedgeRatio = None
self.__spread = None
self.__spreadMean = None
self.__spreadStd = None
self.__zScore = None
def getSpread(self):
return self.__spread
def getSpreadMean(self):
return self.__spreadMean
def getSpreadStd(self):
return self.__spreadStd
def getZScore(self):
return self.__zScore
def getHedgeRatio(self):
return self.__hedgeRatio
def __updateHedgeRatio(self, values1, values2):
self.__hedgeRatio = get_beta(values1, values2)
def __updateSpreadMeanAndStd(self, values1, values2):
if self.__hedgeRatio is not None:
spread = values1 - values2 * self.__hedgeRatio
self.__spreadMean = spread.mean()
self.__spreadStd = spread.std(ddof=1)
def __updateSpread(self):
if self.__hedgeRatio is not None:
self.__spread = self.__ds1[-1] - self.__hedgeRatio * self.__ds2[-1]
def __updateZScore(self):
if self.__spread is not None and self.__spreadMean is not None and self.__spreadStd is not None:
self.__zScore = (self.__spread - self.__spreadMean) / float(self.__spreadStd)
def update(self):
if len(self.__ds1) >= self.__windowSize:
values1 = np.asarray(self.__ds1[-1*self.__windowSize:])
values2 = np.asarray(self.__ds2[-1*self.__windowSize:])
self.__updateHedgeRatio(values1, values2)
self.__updateSpread()
self.__updateSpreadMeanAndStd(values1, values2)
self.__updateZScore()
class StatArb(strategy.BacktestingStrategy):
def __init__(self, feed, instrument1, instrument2, windowSize):
strategy.BacktestingStrategy.__init__(self, feed)
self.setUseAdjustedValues(True)
self.__statArbHelper = StatArbHelper(feed[instrument1].getAdjCloseDataSeries(), feed[instrument2].getAdjCloseDataSeries(), windowSize)
self.__i1 = instrument1
self.__i2 = instrument2
# These are used only for plotting purposes.
self.__spread = dataseries.SequenceDataSeries()
self.__hedgeRatio = dataseries.SequenceDataSeries()
def getSpreadDS(self):
return self.__spread
def getHedgeRatioDS(self):
return self.__hedgeRatio
def __getOrderSize(self, bars, hedgeRatio):
cash = self.getBroker().getCash(False)
price1 = bars[self.__i1].getAdjClose()
price2 = bars[self.__i2].getAdjClose()
size1 = int(cash / (price1 + hedgeRatio * price2))
size2 = int(size1 * hedgeRatio)
return (size1, size2)
def buySpread(self, bars, hedgeRatio):
amount1, amount2 = self.__getOrderSize(bars, hedgeRatio)
self.marketOrder(self.__i1, amount1)
self.marketOrder(self.__i2, amount2 * -1)
def sellSpread(self, bars, hedgeRatio):
amount1, amount2 = self.__getOrderSize(bars, hedgeRatio)
self.marketOrder(self.__i1, amount1 * -1)
self.marketOrder(self.__i2, amount2)
def reducePosition(self, instrument):
currentPos = self.getBroker().getShares(instrument)
if currentPos > 0:
self.marketOrder(instrument, currentPos * -1)
elif currentPos < 0:
self.marketOrder(instrument, currentPos * -1)
def onBars(self, bars):
self.__statArbHelper.update()
# These is used only for plotting purposes.
self.__spread.appendWithDateTime(bars.getDateTime(), self.__statArbHelper.getSpread())
self.__hedgeRatio.appendWithDateTime(bars.getDateTime(), self.__statArbHelper.getHedgeRatio())
if bars.getBar(self.__i1) and bars.getBar(self.__i2):
hedgeRatio = self.__statArbHelper.getHedgeRatio()
zScore = self.__statArbHelper.getZScore()
if zScore is not None:
currentPos = abs(self.getBroker().getShares(self.__i1)) + abs(self.getBroker().getShares(self.__i2))
if abs(zScore) <= 1 and currentPos != 0:
self.reducePosition(self.__i1)
self.reducePosition(self.__i2)
elif zScore <= -2 and currentPos == 0: # Buy spread when its value drops below 2 standard deviations.
self.buySpread(bars, hedgeRatio)
elif zScore >= 2 and currentPos == 0: # Short spread when its value rises above 2 standard deviations.
self.sellSpread(bars, hedgeRatio)
def main(plot):
instruments = ["gld", "gdx"]
windowSize = 50
# Download the bars.
feed = yahoofinance.build_feed(instruments, 2006, 2012, ".")
strat = StatArb(feed, instruments[0], instruments[1], windowSize)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
if plot:
plt = plotter.StrategyPlotter(strat, False, False, True)
plt.getOrCreateSubplot("hedge").addDataSeries("Hedge Ratio", strat.getHedgeRatioDS())
plt.getOrCreateSubplot("spread").addDataSeries("Spread", strat.getSpreadDS())
strat.run()
print "Sharpe ratio: %.2f" % sharpeRatioAnalyzer.getSharpeRatio(0.05)
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
| 36.819876 | 142 | 0.667173 | [
"Apache-2.0"
] | 01FinTech/pyalgotrade-cn | samples/statarb_erniechan.py | 5,928 | Python |
# Generated by Django 2.1.9 on 2020-02-13 15:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('stores', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=25, unique=True)),
('email', models.EmailField(max_length=40, unique=True)),
('active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('deleted', models.BooleanField(default=False, help_text='Toogle to prevent actual deletes')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('store', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='stores.Store')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
),
]
| 57.216216 | 266 | 0.653755 | [
"MIT"
] | kwanj-k/ctrim | apps/users/migrations/0001_initial.py | 2,117 | Python |
Subsets and Splits