filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_26395 | from __future__ import print_function
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
import os
import sys
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import RpcRequest
client = AcsClient('your_access_key', 'your_access_secret', 'cn-hangzhou')
location_service = client.get_location_service()
location_service.set_location_service_attr(region='cn-beijing',
product_name="Location",
domain="location.aliyuncs.com")
domain = location_service.find_product_domain(client.get_region_id(), 'oss');
print(domain)
domain = location_service.find_product_domain(client.get_region_id(), 'oss');
print(domain)
class DescribeRegionsRequest(RpcRequest):
def __init__(self, OwnerId = None, ResourceOwnerAccount = None,
ResourceOwnerId = None, OwnerAccount = None):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeRegions', 'oss')
self.add_query_param('OwnerId',OwnerId)
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
self.add_query_param('OwnerAccount',OwnerAccount)
request = DescribeRegionsRequest()
status, headers, body = client.get_response(request)
print(status)
print(body)
|
the-stack_106_26397 |
'''A wrapper for fixed column text files
This module copies a fixed column TXT file into a dictionary to manipulate,
and enrich. It can also be exported into a TXT file with a different
structure than the original file.
'''
import logging
from pathlib import Path
import sys
import tempfile
import beetools
import displayfx
_VERSION = "0.0.1"
_path = Path(__file__)
_name = _path.stem
class TxtWrpr:
'''A wrapper for fixed column text files
This module copies a fixed column TXT file into a dictionary structure
to manipulate and enrich. It can also be exported into a TXT file with
a different structure than the original file. Each row of the TXT file
must have at least one unique field that can be used as a key.
'''
def __init__( self, p_parent_logger_name,
p_key_idx,
p_src_field_def,
p_src = None,
p_has_header = True,
p_verbose = False
):
'''Initialize the class
Parameters
----------
p_parent_logger_name
Logger name of the caller. Assigns a logger name to the logger
in the <p_parent_logger_name>.<_name> format.
p_key_idx
Indicates the column number used as a key for the dictionary
starting at 0.
p_src_field_def
Definition of the source file presented in a list.
[[FieldName1,ColStart1,ColEnd1],
[FieldName2,ColStart2,ColEnd2],
....
[FieldNameN,ColStartN,ColEndN]]
where:
FieldName = Name of the field in the header
ColStart = Start column
ColEnd = End column of the field
p_src = None
List or a file path.
p_has_header = True
Indicate if the source has a header as the first record.
p_verbose = False
Print feed back to the screen
Returns
-------
None
Examples
--------
See example in do_example
'''
self.logger_name = '{}.{}'.format( p_parent_logger_name, _name )
self.logger = logging.getLogger( self.logger_name )
self.logger.info( 'Start' )
self.success = False
self.exp_data = None
self.exp_field_def = None
self.exp_header = None
self.exp_pth = None
self.has_header = p_has_header
self.key_idx = p_key_idx
self.member_cntr = 0
self.parsed_data = {}
self.src_data = None
self.src_data = None
self.src_field_def = p_src_field_def
self.src_pth = None
self.verbose = p_verbose
self.read_txt(p_src)
def assign_src(self, p_src):
'''Assign src_data to the source data. It accept either a list or
a path to a file. If the source is a list, the src_path is set
"list" or alternatively the path to the file is assighend to src_pth.
Parameters
----------
p_src
Source data for parsing
Returns
-------
src_data
A list or Path to the source data
Examples
--------
See example in do_example
'''
self.success = True
if isinstance(p_src, list):
self.src_data = p_src
self.src_pth = list
elif p_src:
if isinstance(p_src, Path):
self.src_pth = p_src
with open( self.src_pth,'r', encoding = 'cp1252' ) as src_file:
self.src_data = src_file.readlines()
else:
msg = beetools.msg_error('{} does not exist.\nSystem terminated.'.format(self.src_pth))
print(msg)
self.success = False
sys.exit()
return self.src_data
def read_txt( self, p_src = None, p_verbose = False ):
'''Process the fixed width column text file into a dictionary
Parameters
----------
p_src = None
A list or Path to the source data
p_verbose = False
Print feed back to the screen
Returns
-------
Examples
--------
See example in do_example
'''
self.assign_src(p_src)
if self.src_data:
if self.has_header:
t_src_data = self.src_data[1:]
else:
t_src_data = self.src_data
recs_in_file = len( self.src_data ) - 1
if isinstance(self.src_pth, Path):
msg = beetools.msg_display( 'Process {} ({})'.format( self.src_pth, recs_in_file ))
else:
msg = beetools.msg_display( 'Process data ({})'.format(recs_in_file ))
fx_bar = displayfx.DisplayFx( _name, recs_in_file, p_msg = msg)
for fx_cntr, rec in enumerate(t_src_data):
key_id = str(rec[self.src_field_def[self.key_idx][ 1 ] : self.src_field_def[self.key_idx][ 2 ]].strip())
self.parsed_data[key_id] = {}
for field_name, field_start, field_end in self.src_field_def:
self.parsed_data[key_id][field_name] = rec[ field_start : field_end ].strip()
self.member_cntr += 1
if p_verbose:
fx_bar.update( fx_cntr )
if len( self.parsed_data ) >= 0:
self.success = True
else:
self.success = False
return self.parsed_data
def write_txt(self, p_exp_pth, p_exp_field_def, p_exp_header = True, p_verbose = False):
'''Export fixed width column text file
Parameters
----------
p_exp_pth
Path to export the file to
p_exp_field_def
Definition of the export file presented in a list.
[[FieldName1,ColStart1,ColEnd1,StaticText],
[FieldName2,ColStart2,ColEnd2,StaticText],
....
[FieldNameN,ColStartN,ColEndN,StaticText]]
where:
FieldName = Name of the field in the header
ColStart = Start column
ColEnd = End column of the field
If FieldName is not an existing field, this text will be
inserted in this column (enrichment)
p_exp_header = True
Write the header to the TXT file.
p_verbose = False
Print feed back to the screen
Returns
-------
str
Exported data in string format.
Examples
--------
See example in do_example
'''
self.exp_pth = p_exp_pth
self.exp_field_def = p_exp_field_def
self.exp_header = p_exp_header
self.exp_data = ''
if p_exp_header:
for field in p_exp_field_def:
field_len = field[2] - field[1]
self.exp_data += '{: <{field_len}}'.format(field[0][:field_len], field_len = field_len)
self.exp_data += '\n'
msg = beetools.msg_display( 'Process {} ({})'.format( self.exp_pth, self.member_cntr))
fx_bar = displayfx.DisplayFx( _name, self.member_cntr, p_msg = msg)
for fx_cntr, rec in enumerate(self.parsed_data):
exp_rec = ''
for field in p_exp_field_def:
field_len = field[2] - field[1]
if field[0] in self.parsed_data[rec]:
field_contents = self.parsed_data[rec][field[0]]
else:
field_contents = field[3]
exp_rec += '{: <{field_len}}'.format(field_contents, field_len = field_len)
self.exp_data += '{}\n'.format(exp_rec)
if p_verbose:
fx_bar.update( fx_cntr )
self.exp_pth.write_text(self.exp_data)
return self.exp_data
def do_example( p_app_path = '', p_cls = True ):
'''Eample on the usage of the class.
'''
def basic_example():
'''Basic and mandatory scenario tests for certification of the class
'''
success = True
dst_field_def = [
['OrgMemberId', 0 , 15],
['SurnameName', 15, 50],
['Gender' , 53, 54],
['BirthYear' , 59, 63]
]
src_data = [
'''OrgMemberId SurnameName FedgStd Birt''',
'''11000120 Makoto,Rodwell ZIMM2378 1987''',
'''14300133 Klaasen,Calvin Jong RSAM2226 1987''',
'''14300427 Van der Nat,Nicholas RSAM2362 1979''',
'''14300702 Mabusela,Johannes Manyedi RSAM2250 1984''',
'''14300753 Masango,Spencer ZIMM2232 1982''',
'''14304600 Barrish,Daniel RSAM2252 2000''',
'''14700077 Amonatov,Farrukh TJKM2632 1978''',
'''5001668 Sriram,Jha INDM2396 1976''',
'''5021103 Grover,Sahaj INDM2473 1995''',
'''8700249 Jere,Daniel ZAMM2384 1986''',
]
src_field_def = [
['OrgMemberId', 0 , 15],
['SurnameName', 15, 50],
['Fed' , 50, 53],
['Gender' , 53, 54],
['Std' , 54, 59],
['BirthYear' , 59, 63]
]
key_idx = 0
txt_file = TxtWrpr(
_name,
key_idx,
src_field_def,
p_has_header = False,
p_verbose = True )
success = txt_file.read_txt(src_data)
dst_fldr = Path(tempfile.TemporaryDirectory().name)
success = txt_file.write_txt(dst_fldr, dst_field_def)
return success
success = True
b_tls = beetools.Archiver( _name,
_VERSION,
__doc__[0],
p_app_path = p_app_path,
p_cls = p_cls )
logger = logging.getLogger( _name )
logger.setLevel( beetools.DEF_LOG_LEV )
file_handle = logging.FileHandler( beetools.LOG_FILE_NAME, mode = 'w' )
file_handle.setLevel( beetools.DEF_LOG_LEV_FILE )
console_handle = logging.StreamHandler()
console_handle.setLevel( beetools.DEF_LOG_LEV_CON )
file_format = logging.Formatter( beetools.LOG_FILE_FORMAT, datefmt = beetools.LOG_DATE_FORMAT )
console_format = logging.Formatter( beetools.LOG_CONSOLE_FORMAT )
file_handle.setFormatter( file_format )
console_handle.setFormatter( console_format )
logger.addHandler( file_handle )
logger.addHandler( console_handle )
b_tls.print_header( p_cls = p_cls )
success = basic_example()
beetools.result_rep( success, 'Done' )
b_tls.print_footer()
if success:
return b_tls.archive_path
return False
# end do_tests
if __name__ == '__main__':
do_example(p_app_path=_path)
# end __main__
|
the-stack_106_26399 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Optimize chains of single-qubit u1, u2, u3 gates by combining them into a single gate."""
from itertools import groupby
import numpy as np
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.circuit.library.standard_gates.p import PhaseGate
from qiskit.circuit.library.standard_gates.u import UGate
from qiskit.circuit.library.standard_gates.u1 import U1Gate
from qiskit.circuit.library.standard_gates.u2 import U2Gate
from qiskit.circuit.library.standard_gates.u3 import U3Gate
from qiskit.circuit.gate import Gate
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.quantum_info.synthesis import Quaternion
_CHOP_THRESHOLD = 1e-15
class Optimize1qGates(TransformationPass):
"""Optimize chains of single-qubit u1, u2, u3 gates by combining them into a single gate."""
def __init__(self, basis=None, eps=1e-15):
"""Optimize1qGates initializer.
Args:
basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects
of this pass, the basis is the set intersection between the `basis` parameter and
the set `{'u1','u2','u3', 'u', 'p'}`.
eps (float): EPS to check against
"""
super().__init__()
self.basis = basis if basis else ["u1", "u2", "u3"]
self.eps = eps
def run(self, dag):
"""Run the Optimize1qGates pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
Raises:
TranspilerError: if YZY and ZYZ angles do not give same rotation matrix.
"""
use_u = 'u' in self.basis
use_p = 'p' in self.basis
runs = dag.collect_runs(["u1", "u2", "u3", "u", 'p'])
runs = _split_runs_on_parameters(runs)
for run in runs:
if use_p:
right_name = "p"
else:
right_name = "u1"
right_parameters = (0, 0, 0) # (theta, phi, lambda)
right_global_phase = 0
for current_node in run:
left_name = current_node.name
if (current_node.condition is not None
or len(current_node.qargs) != 1
or left_name not in ["p", "u1", "u2", "u3", 'u', "id"]):
raise TranspilerError("internal error")
if left_name in ("u1", "p"):
left_parameters = (0, 0, current_node.op.params[0])
elif left_name == "u2":
left_parameters = (np.pi / 2, current_node.op.params[0],
current_node.op.params[1])
elif left_name in ("u3", 'u'):
left_parameters = tuple(current_node.op.params)
else:
if use_p:
left_name = "p"
else:
left_name = "u1" # replace id with u1
left_parameters = (0, 0, 0)
if (current_node.op.definition is not None and
current_node.op.definition.global_phase):
right_global_phase += current_node.op.definition.global_phase
# If there are any sympy objects coming from the gate convert
# to numpy.
left_parameters = tuple(float(x) for x in left_parameters)
# Compose gates
name_tuple = (left_name, right_name)
if name_tuple in (("u1", "u1"), ("p", "p")):
# u1(lambda1) * u1(lambda2) = u1(lambda1 + lambda2)
right_parameters = (0, 0, right_parameters[2] +
left_parameters[2])
elif name_tuple in (("u1", "u2"), ("p", "u2")):
# u1(lambda1) * u2(phi2, lambda2) = u2(phi2 + lambda1, lambda2)
right_parameters = (np.pi / 2, right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple in (("u2", "u1"), ("u2", "p")):
# u2(phi1, lambda1) * u1(lambda2) = u2(phi1, lambda1 + lambda2)
right_name = "u2"
right_parameters = (np.pi / 2, left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple in (("u1", "u3"), ("u1", "u"), ("p", "u3"), ("p", "u")):
# u1(lambda1) * u3(theta2, phi2, lambda2) =
# u3(theta2, phi2 + lambda1, lambda2)
right_parameters = (right_parameters[0], right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple in (("u3", "u1"), ('u', 'u1'), ("u3", "p"), ("u", "p")):
# u3(theta1, phi1, lambda1) * u1(lambda2) =
# u3(theta1, phi1, lambda1 + lambda2)
if use_u:
right_name = 'u'
else:
right_name = "u3"
right_parameters = (left_parameters[0], left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u2", "u2"):
# Using Ry(pi/2).Rz(2*lambda).Ry(pi/2) =
# Rz(pi/2).Ry(pi-2*lambda).Rz(pi/2),
# u2(phi1, lambda1) * u2(phi2, lambda2) =
# u3(pi - lambda1 - phi2, phi1 + pi/2, lambda2 + pi/2)
if use_u:
right_name = 'u'
else:
right_name = "u3"
right_parameters = (np.pi - left_parameters[2] -
right_parameters[1], left_parameters[1] +
np.pi / 2, right_parameters[2] +
np.pi / 2)
elif name_tuple[1] == "nop":
right_name = left_name
right_parameters = left_parameters
else:
# For composing u3's or u2's with u3's, use
# u2(phi, lambda) = u3(pi/2, phi, lambda)
# together with the qiskit.mapper.compose_u3 method.
if use_u:
right_name = 'u'
else:
right_name = "u3"
# Evaluate the symbolic expressions for efficiency
right_parameters = Optimize1qGates.compose_u3(left_parameters[0],
left_parameters[1],
left_parameters[2],
right_parameters[0],
right_parameters[1],
right_parameters[2])
# Why evalf()? This program:
# OPENQASM 2.0;
# include "qelib1.inc";
# qreg q[2];
# creg c[2];
# u3(0.518016983430947*pi,1.37051598592907*pi,1.36816383603222*pi) q[0];
# u3(1.69867232277986*pi,0.371448347747471*pi,0.461117217930936*pi) q[0];
# u3(0.294319836336836*pi,0.450325871124225*pi,1.46804720442555*pi) q[0];
# measure q -> c;
# took >630 seconds (did not complete) to optimize without
# calling evalf() at all, 19 seconds to optimize calling
# evalf() AFTER compose_u3, and 1 second to optimize
# calling evalf() BEFORE compose_u3.
# 1. Here down, when we simplify, we add f(theta) to lambda to
# correct the global phase when f(theta) is 2*pi. This isn't
# necessary but the other steps preserve the global phase, so
# we continue in that manner.
# 2. The final step will remove Z rotations by 2*pi.
# 3. Note that is_zero is true only if the expression is exactly
# zero. If the input expressions have already been evaluated
# then these final simplifications will not occur.
# TODO After we refactor, we should have separate passes for
# exact and approximate rewriting.
# Y rotation is 0 mod 2*pi, so the gate is a u1
if abs(np.mod(right_parameters[0],
(2 * np.pi))) < self.eps and right_name != "u1" \
and right_name != "p":
if use_p:
right_name = "p"
else:
right_name = "u1"
right_parameters = (0, 0, right_parameters[1] +
right_parameters[2] +
right_parameters[0])
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if right_name in ("u3", 'u'):
# theta = pi/2 + 2*k*pi
right_angle = right_parameters[0] - np.pi / 2
if abs(right_angle) < self.eps:
right_angle = 0
if abs(np.mod((right_angle),
2 * np.pi)) < self.eps:
right_name = "u2"
right_parameters = (np.pi / 2, right_parameters[1],
right_parameters[2] +
(right_parameters[0] - np.pi / 2))
# theta = -pi/2 + 2*k*pi
right_angle = right_parameters[0] + np.pi / 2
if abs(right_angle) < self.eps:
right_angle = 0
if abs(np.mod(right_angle,
2 * np.pi)) < self.eps:
right_name = "u2"
right_parameters = (np.pi / 2, right_parameters[1] +
np.pi, right_parameters[2] -
np.pi + (right_parameters[0] +
np.pi / 2))
# u1 and lambda is 0 mod 2*pi so gate is nop (up to a global phase)
if right_name in ("u1", "p") and abs(
np.mod(right_parameters[2], 2 * np.pi)) < self.eps:
right_name = "nop"
if right_name == "u2" and "u2" not in self.basis:
if use_u:
right_name = 'u'
else:
right_name = "u3"
if right_name in ("u1", "p") and right_name not in self.basis:
if use_u:
right_name = 'u'
else:
right_name = "u3"
new_op = Gate(name="", num_qubits=1, params=[])
if right_name == "u1":
new_op = U1Gate(right_parameters[2])
if right_name == "p":
new_op = PhaseGate(right_parameters[2])
if right_name == "u2":
new_op = U2Gate(right_parameters[1], right_parameters[2])
if right_name == "u":
if "u" in self.basis:
new_op = UGate(*right_parameters)
if right_name == "u3":
if "u3" in self.basis:
new_op = U3Gate(*right_parameters)
else:
raise TranspilerError('It was not possible to use the basis %s' % self.basis)
dag.global_phase += right_global_phase
if right_name != 'nop':
dag.substitute_node(run[0], new_op, inplace=True)
# Delete the other nodes in the run
for current_node in run[1:]:
dag.remove_op_node(current_node)
if right_name == "nop":
dag.remove_op_node(run[0])
return dag
@staticmethod
def compose_u3(theta1, phi1, lambda1, theta2, phi2, lambda2):
"""Return a triple theta, phi, lambda for the product.
u3(theta, phi, lambda)
= u3(theta1, phi1, lambda1).u3(theta2, phi2, lambda2)
= Rz(phi1).Ry(theta1).Rz(lambda1+phi2).Ry(theta2).Rz(lambda2)
= Rz(phi1).Rz(phi').Ry(theta').Rz(lambda').Rz(lambda2)
= u3(theta', phi1 + phi', lambda2 + lambda')
Return theta, phi, lambda.
"""
# Careful with the factor of two in yzy_to_zyz
thetap, phip, lambdap = Optimize1qGates.yzy_to_zyz((lambda1 + phi2), theta1, theta2)
(theta, phi, lamb) = (thetap, phi1 + phip, lambda2 + lambdap)
return (theta, phi, lamb)
@staticmethod
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
"""
quaternion_yzy = Quaternion.from_euler([theta1, xi, theta2], 'yzy')
euler = quaternion_yzy.to_zyz()
quaternion_zyz = Quaternion.from_euler(euler, 'zyz')
# output order different than rotation order
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))
if not np.allclose(abs_inner, 1, eps):
raise TranspilerError('YZY and ZYZ angles do not give same rotation matrix.')
out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle
for angle in out_angles)
return out_angles
def _split_runs_on_parameters(runs):
"""Finds runs containing parameterized gates and splits them into sequential
runs excluding the parameterized gates.
"""
out = []
for run in runs:
groups = groupby(run, lambda x: x.op.is_parameterized())
for group_is_parameterized, gates in groups:
if not group_is_parameterized:
out.append(list(gates))
return out
|
the-stack_106_26401 | # -*- coding: utf-8 -*-
import json
import typing
from apispec import APISpec
import pytest
import serpyco
from serpyco import nested_field
from serpyco import string_field
from apispec_serpyco import SerpycoPlugin
from apispec_serpyco.utils import schema_name_resolver
import dataclasses
from dataclasses import dataclass
from tests.utils import get_definitions
from tests.utils import get_parameters
from tests.utils import get_paths
from tests.utils import get_responses
from tests.utils import ref_path
@dataclass
class PetSchema(object):
id: int = string_field(description="Pet id")
name: str = string_field(description="Pet name")
password: str = string_field(description="Pet auth password")
@dataclass
class SampleSchema(object):
count: int
runs: typing.List["RunSchema"] = nested_field(exclude=["sample"])
@dataclass
class RunSchema(object):
sample: typing.List[SampleSchema] = nested_field(exclude=["runs"])
@dataclass
class AnalysisSchema(object):
sample: SampleSchema
@dataclass
class AnalysisWithListSchema(object):
samples: typing.List[SampleSchema]
@dataclass
class SelfReferencingSchema(object):
id: int
single: "SelfReferencingSchema"
many: typing.List["SelfReferencingSchema"]
@dataclass
class DefaultValuesSchema(object):
number_auto_default: int = dataclasses.field(default=12)
string_callable_default: str = dataclasses.field(
default_factory=lambda: "Callable value"
)
numbers: typing.List[int] = dataclasses.field(default_factory=lambda: [])
class TestDefinitionHelper:
@pytest.mark.parametrize("schema", [PetSchema])
def test_can_use_schema_as_definition(self, spec, schema):
spec.components.schema("Pet", schema=schema)
definitions = get_definitions(spec)
props = definitions["Pet"]["properties"]
assert props["id"]["type"] == "integer"
assert props["name"]["type"] == "string"
def test_schema_helper_without_schema(self, spec):
spec.components.schema("Pet", {"properties": {"key": {"type": "integer"}}})
definitions = get_definitions(spec)
assert definitions["Pet"]["properties"] == {"key": {"type": "integer"}}
@pytest.mark.parametrize("schema", [AnalysisSchema])
def test_resolve_schema_dict_auto_reference(self, schema):
def resolver(schema):
return schema.__name__
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(SerpycoPlugin(schema_name_resolver=schema_name_resolver),),
)
assert {} == get_definitions(spec)
spec.components.schema("analysis", schema=schema)
spec.path(
"/test",
operations={
"get": {
"responses": {"200": {"schema": {"$ref": "#/definitions/analysis"}}}
}
},
)
definitions = get_definitions(spec)
assert 3 == len(definitions)
assert "analysis" in definitions
assert "SampleSchema" in definitions
assert "RunSchema_exclude_sample" in definitions
@pytest.mark.parametrize("schema", [AnalysisWithListSchema])
def test_resolve_schema_dict_auto_reference_in_list(self, schema):
def resolver(schema):
return schema.__name__
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(SerpycoPlugin(),),
)
assert {} == get_definitions(spec)
spec.components.schema("analysis", schema=schema)
spec.path(
"/test",
operations={
"get": {
"responses": {"200": {"schema": {"$ref": "#/definitions/analysis"}}}
}
},
)
definitions = get_definitions(spec)
assert 3 == len(definitions)
assert "analysis" in definitions
assert "tests.test_ext_serpyco.SampleSchema" in definitions
assert "tests.test_ext_serpyco.RunSchema_exclude_sample" in definitions
class TestComponentParameterHelper(object):
@pytest.mark.parametrize("schema", [PetSchema])
def test_can_use_schema_in_parameter(self, spec, schema):
if spec.openapi_version.major < 3:
kwargs = {"schema": schema}
else:
kwargs = {"content": {"application/json": {"schema": schema}}}
spec.components.parameter("Pet", "body", **kwargs)
parameter = get_parameters(spec)["Pet"]
assert parameter["in"] == "body"
if spec.openapi_version.major < 3:
schema = parameter["schema"]["properties"]
else:
schema = parameter["content"]["application/json"]["schema"]["properties"]
assert schema["name"]["type"] == "string"
assert schema["password"]["type"] == "string"
class TestComponentResponseHelper:
@pytest.mark.parametrize("schema", [PetSchema])
def test_can_use_schema_in_response(self, spec, schema):
if spec.openapi_version.major < 3:
kwargs = {"schema": schema}
else:
kwargs = {"content": {"application/json": {"schema": schema}}}
spec.components.response("GetPetOk", **kwargs)
response = get_responses(spec)["GetPetOk"]
if spec.openapi_version.major < 3:
schema = response["schema"]["properties"]
else:
schema = response["content"]["application/json"]["schema"]["properties"]
assert schema["id"]["type"] == "integer"
assert schema["name"]["type"] == "string"
class TestCustomField:
def test_can_use_custom_field_decorator(self, spec_fixture):
@dataclass
class CustomPetASchema(PetSchema):
email: str = serpyco.string_field(
format_=serpyco.StringFormat.EMAIL,
pattern="^[A-Z]",
min_length=3,
max_length=24,
)
@dataclass
class CustomPetBSchema(PetSchema):
age: int = serpyco.number_field(minimum=1, maximum=120)
@dataclass
class WithStringField(object):
"""String field test class"""
foo: str = serpyco.string_field(
format_=serpyco.StringFormat.EMAIL,
pattern="^[A-Z]",
min_length=3,
max_length=24,
)
serializer = serpyco.Serializer(WithStringField)
serializer.json_schema()
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.components.schema("CustomPetA", schema=CustomPetASchema)
spec_fixture.spec.components.schema("CustomPetB", schema=CustomPetBSchema)
props_0 = get_definitions(spec_fixture.spec)["Pet"]["properties"]
props_a = get_definitions(spec_fixture.spec)["CustomPetA"]["properties"]
props_b = get_definitions(spec_fixture.spec)["CustomPetB"]["properties"]
assert props_0["name"]["type"] == "string"
assert "format" not in props_0["name"]
assert props_a["email"]["type"] == "string"
assert json.dumps(props_a["email"]["format"]) == '"email"'
assert props_a["email"]["pattern"] == "^[A-Z]"
assert props_a["email"]["maxLength"] == 24
assert props_a["email"]["minLength"] == 3
assert props_b["age"]["type"] == "integer"
assert props_b["age"]["minimum"] == 1
assert props_b["age"]["maximum"] == 120
class TestOperationHelper:
@staticmethod
def ref_path(spec):
if spec.openapi_version.version[0] < 3:
return "#/definitions/"
return "#/components/schemas/"
@pytest.mark.parametrize("pet_schema", (PetSchema,))
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_v2(self, spec_fixture, pet_schema):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
"200": {
"schema": pet_schema,
"description": "successful operation",
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert get["responses"]["200"][
"schema"
] == spec_fixture.openapi.schema2jsonschema(PetSchema)
assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize("pet_schema", (PetSchema,))
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_v3(self, spec_fixture, pet_schema):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
"200": {
"content": {"application/json": {"schema": pet_schema}},
"description": "successful operation",
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
resolved_schema = get["responses"]["200"]["content"]["application/json"][
"schema"
]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_expand_parameters_v2(self, spec_fixture):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"parameters": [
{
"in": "body",
"description": "a pet schema",
"required": True,
"name": "pet",
"schema": PetSchema,
}
]
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
get = p["get"]
assert get["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema, default_in="query"
)
post = p["post"]
assert post["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema,
default_in="body",
required=True,
name="pet",
description="a pet schema",
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_expand_parameters_v3(self, spec_fixture):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"description": "a pet schema",
"required": True,
"content": {"application/json": {"schema": PetSchema}},
}
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
get = p["get"]
assert get["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema, default_in="query"
)
post = p["post"]
post_schema = spec_fixture.openapi.resolve_schema_dict(PetSchema)
assert (
post["requestBody"]["content"]["application/json"]["schema"] == post_schema
)
assert post["requestBody"]["description"] == "a pet schema"
assert post["requestBody"]["required"]
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_uses_ref_if_available_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={"get": {"responses": {"200": {"schema": PetSchema}}}},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert (
get["responses"]["200"]["schema"]["$ref"]
== self.ref_path(spec_fixture.spec) + "Pet"
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_uses_ref_if_available_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
"200": {"content": {"application/json": {"schema": PetSchema}}}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert (
get["responses"]["200"]["content"]["application/json"]["schema"]["$ref"]
== self.ref_path(spec_fixture.spec) + "Pet"
)
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_uses_ref_in_parameters_and_request_body_if_available_v2(
self, spec_fixture
):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {"parameters": [{"in": "body", "schema": PetSchema}]},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
assert "schema" not in p["get"]["parameters"][0]
post = p["post"]
assert len(post["parameters"]) == 1
assert (
post["parameters"][0]["schema"]["$ref"]
== self.ref_path(spec_fixture.spec) + "Pet"
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_uses_ref_in_parameters_and_request_body_if_available_v3(
self, spec_fixture
):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"content": {"application/json": {"schema": PetSchema}}
}
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
assert "schema" in p["get"]["parameters"][0]
post = p["post"]
schema_ref = post["requestBody"]["content"]["application/json"]["schema"]
assert schema_ref == {"$ref": self.ref_path(spec_fixture.spec) + "Pet"}
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_array_uses_ref_if_available_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"parameters": [
{
"in": "body",
"name": "body",
"schema": {"type": "array", "items": PetSchema},
}
],
"responses": {
"200": {"schema": {"type": "array", "items": PetSchema}}
},
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert len(get["parameters"]) == 1
resolved_schema = {
"type": "array",
"items": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
}
assert get["parameters"][0]["schema"] == resolved_schema
assert get["responses"]["200"]["schema"] == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_array_uses_ref_if_available_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"parameters": [
{
"in": "body",
"name": " body",
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
},
}
],
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
}
}
},
}
},
)
p = get_paths(spec_fixture.spec)["/pet"]
assert "get" in p
op = p["get"]
resolved_schema = {
"type": "array",
"items": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
}
request_schema = op["parameters"][0]["content"]["application/json"]["schema"]
assert request_schema == resolved_schema
response_schema = op["responses"]["200"]["content"]["application/json"][
"schema"
]
assert response_schema == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_partially_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/parents",
operations={
"get": {
"responses": {
"200": {
"schema": {
"type": "object",
"properties": {
"mother": PetSchema,
"father": PetSchema,
},
}
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["responses"]["200"]["schema"] == {
"type": "object",
"properties": {
"mother": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
"father": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
},
}
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_partially_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/parents",
operations={
"get": {
"responses": {
"200": {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"mother": PetSchema,
"father": PetSchema,
},
}
}
}
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["responses"]["200"]["content"]["application/json"]["schema"] == {
"type": "object",
"properties": {
"mother": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
"father": {"$ref": self.ref_path(spec_fixture.spec) + "Pet"},
},
}
class TestCircularReference:
def test_circular_referencing_schemas(self, spec):
spec.components.schema("Analysis", schema=AnalysisSchema)
spec.components.schema("Sample", schema=SampleSchema)
spec.components.schema("Run", schema=RunSchema)
definitions = get_definitions(spec)
ref = definitions["Analysis"]["properties"]["sample"]["$ref"]
assert ref == ref_path(spec) + "tests.test_ext_serpyco.SampleSchema"
class TestSelfReference:
def test_self_referencing_field_single(self, spec):
spec.components.schema("SelfReference", schema=SelfReferencingSchema)
definitions = get_definitions(spec)
ref = definitions["SelfReference"]["properties"]["single"]["$ref"]
assert ref == ref_path(spec) + "SelfReference"
def test_self_referencing_field_many(self, spec):
spec.components.schema("SelfReference", schema=SelfReferencingSchema)
definitions = get_definitions(spec)
result = definitions["SelfReference"]["properties"]["many"]
assert result == {
"type": "array",
"items": {"$ref": ref_path(spec) + "SelfReference"},
}
class TestSchemaWithDefaultValues:
def test_schema_with_default_values(self, spec):
spec.components.schema("DefaultValuesSchema", schema=DefaultValuesSchema)
definitions = get_definitions(spec)
props = definitions["DefaultValuesSchema"]["properties"]
assert props["number_auto_default"]["default"] == 12
# FIXME BS 2019-10-21: restore these 2 lines when
# https://gitlab.com/sgrignard/serpyco/issues/32 resolved
# assert props["string_callable_default"]["default"] == "Callable value"
# assert props["numbers"]["default"] == []
class TestSchemaWithOptional:
def test_schema_with_optional_string(self, spec):
@dataclasses.dataclass
class MySchema:
id: int
name: typing.Optional[str] = None
spec.components.schema("MySchema", schema=MySchema)
definitions = get_definitions(spec)
props = definitions["MySchema"]["properties"]
assert "required" in definitions["MySchema"]
assert ["id"] == definitions["MySchema"]["required"]
assert {"id": {"type": "integer"}, "name": {"type": "string"}} == props
def test_schema_with_optional_string_in_related_schema(self, spec):
@dataclasses.dataclass
class MyChildSchema:
id: int
name: typing.Optional[str] = None
@dataclasses.dataclass
class MyParentSchema:
id: int
child: MyChildSchema
spec.components.schema("MyParentSchema", schema=MyParentSchema)
definitions = get_definitions(spec)
props = definitions["tests.test_ext_serpyco.MyChildSchema"]["properties"]
definition = definitions["tests.test_ext_serpyco.MyChildSchema"]
assert "required" in definition
assert ["id"] == definition["required"]
assert {"id": {"type": "integer"}, "name": {"type": "string"}} == props
|
the-stack_106_26404 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-1.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_106_26405 | from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class Monokai_darkStyle(Style):
"""
This style mimics the Monokai color scheme.
"""
background_color = "#000000"
highlight_color = "#49483e"
styles = {
# No corresponding class for the following:
Text: "#f8f8f2", # class: ''
Whitespace: "", # class: 'w'
Error: "#960050 bg:#1e0010", # class: 'err'
Other: "", # class 'x'
Comment: "#75715e", # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: "#66d9ef", # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: "#f92672", # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: "", # class: 'kt'
Operator: "#f92672", # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: "#f8f8f2", # class: 'p'
Name: "#f8f8f2", # class: 'n'
Name.Attribute: "#a6e22e", # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: "#a6e22e", # class: 'nc' - to be revised
Name.Constant: "#66d9ef", # class: 'no' - to be revised
Name.Decorator: "#a6e22e", # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: "#a6e22e", # class: 'ne'
Name.Function: "#a6e22e", # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: "", # class: 'nn' - to be revised
Name.Other: "#a6e22e", # class: 'nx'
Name.Tag: "#f92672", # class: 'nt' - like a keyword
Name.Variable: "", # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: "#ae81ff", # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: "#ae81ff", # class: 'l'
Literal.Date: "#e6db74", # class: 'ld'
String: "#e6db74", # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: "", # class: 'sc'
String.Doc: "", # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: "#ae81ff", # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: "", # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: "#f92672", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
Generic.Inserted: "#a6e22e", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "#75715e", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
the-stack_106_26406 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Union, Any
import warnings
import numpy as np
import scipy.stats
from ..common.typetools import ArrayLike
# Nevergrad, in the most fundamental layer, uses continuous variables only.
# Discrete variables are handled in one of the following ways:
# - by a softmax transformation, a k-valued categorical variable is converted into k continuous variables.
# - by a discretization - as we often use Gaussian random values, we discretize according to quantiles of the normal
# distribution.
def threshold_discretization(x: ArrayLike, arity: int = 2) -> List[int]:
"""Discretize by casting values from 0 to arity -1, assuming that x values
follow a normal distribution.
Parameters
----------
x: list/array
values to discretize
arity: int
the number of possible integer values (arity n will lead to values from 0 to n - 1)
Note
----
- nans are processed as negative infs (yields 0)
"""
x = np.array(x, copy=True)
if np.any(np.isnan(x)):
warnings.warn("Encountered NaN values for discretization")
x[np.isnan(x)] = -np.inf
if arity == 2: # special case, to have 0 yield 0
return (np.array(x) > 0).astype(int).tolist() # type: ignore
else:
return np.clip(arity * scipy.stats.norm.cdf(x), 0, arity - 1).astype(int).tolist() # type: ignore
# The function below is the opposite of the function above.
def inverse_threshold_discretization(indexes: List[int], arity: int = 2) -> np.ndarray:
indexes_arr = np.array(indexes, copy=True)
pdf_bin_size = 1 / arity
# We take the center of each bin (in the pdf space)
return scipy.stats.norm.ppf(indexes_arr * pdf_bin_size + (pdf_bin_size / 2)) # type: ignore
# The discretization is, by nature, not one to one.
# In the function below, we randomly draw one of the possible inverse values - this is therefore noisy.
def noisy_inverse_threshold_discretization(indexes: List[int], arity: int = 2, gen: Any = None) -> np.ndarray:
indexes_arr = np.array(indexes, copy=True)
pdf_bin_size = 1 / arity
# We take a random point in the bin.
return scipy.stats.norm.ppf(indexes_arr * pdf_bin_size + gen.rand() * pdf_bin_size) # type: ignore
def softmax_discretization(x: ArrayLike, arity: int = 2, random: Union[bool, np.random.RandomState] = True) -> List[int]:
"""Discretize a list of floats to a list of ints based on softmax probabilities.
For arity n, a softmax is applied to the first n values, and the result
serves as probability for the first output integer. The same process it
applied to the other input values.
Parameters
----------
x: list/array
the float values from a continuous space which need to be discretized
arity: int
the number of possible integer values (arity 2 will lead to values in {0, 1})
random: bool or np.random.RandomState
either a RandomState to pull values from, or True for pulling values on the default random state,
or False to get a deterministic behavior
Notes
-----
- if one or several inf values are present, only those are considered
- in case of tie, the deterministic value is the first one (lowest) of the tie
- nans and -infs are ignored, except if all are (then uniform random choice)
"""
data = np.array(x, copy=True, dtype=float).reshape((-1, arity))
if np.any(np.isnan(data)):
warnings.warn("Encountered NaN values for discretization")
data[np.isnan(data)] = -np.inf
if random is False:
output = np.argmax(data, axis=1).tolist()
return output # type: ignore
if isinstance(random, bool): # equivalent to "random is True"
random = np.random # default random number generator (creating a RandomState is slow)
return [random.choice(arity, p=softmax_probas(d)) for d in data]
def softmax_probas(data: np.ndarray) -> np.ndarray:
# TODO: test directly? (currently through softmax discretization)
# TODO: move nan case here?
maxv = np.max(data)
if np.abs(maxv) == np.inf or np.isnan(maxv):
maxv = 0
data = np.exp(data - maxv)
if any(x == np.inf for x in data): # deal with infinite positives special case
data = np.array([int(x == np.inf) for x in data])
if not sum(data):
data = np.ones(len(data))
return data / np.sum(data) # type: ignore
def inverse_softmax_discretization(index: int, arity: int) -> ArrayLike:
# p is an arbitrary probability that the provided arg will be sampled with the returned point
p = (1 / arity) * 1.5
x: np.ndarray = np.zeros(arity)
x[index] = np.log((p * (arity - 1)) / (1 - p))
return x
|
the-stack_106_26407 | import matplotlib.pyplot as plt
import numpy as np
def plot_period(X, y=None,
channel_names=None,
init_second=None,
sample_rate=None,
out_path=None,
return_fig=False):
"""
Plots one period (typically 30 seconds) of PSG data with its annotation.
If neither out_path and return_fig are set, displays the figure and blocks.
Args:
X: (list) A list of ndarrays of PSG periods
y: (string) The epoch stage string (optional)
channel_names: (list) A list of channel names, length equal to
second dimension of 'X'.
init_second (int) Show the time of this period in the axes
title. If not set, no title will be set.
sample_rate (int) The sample rate of the PSG ('X'). Used to
compute the length of the period.
out_path: (string) Optional path to save the figure to
return_fig: (bool) Return the figure instead of saving
(out_path is ignored)
Returns:
Figure and axes objects if return_fig=True, otherwise None
"""
X = X.squeeze()
if X.ndim == 1:
X = np.expand_dims(X, -1)
n_chans = X.shape[-1]
assert len(channel_names) == n_chans
fig, axes = plt.subplots(figsize=(14, 7), ncols=1, nrows=n_chans,
sharex=True)
fig.subplots_adjust(hspace=0)
if n_chans == 1:
axes = [axes]
xs = np.arange(len(X))
for i in range(n_chans):
axes[i].plot(xs, X[:, i], color="black")
axes[i].axhline(0, color='red',
linewidth=1.5)
axes[i].set_xlim(xs[0], xs[-1])
if channel_names:
axes[i].annotate(
s=channel_names[i],
size=max(23-(2*len(channel_names)), 7),
xy=(1.025, 0.5),
xycoords=axes[i].transAxes,
rotation=-90,
va="center",
ha="center"
)
p = "Period {}s".format(init_second) if init_second else ""
p += "-{}s".format(init_second + int(len(X) / sample_rate)) if (init_second
and sample_rate) else ""
if p:
axes[0].set_title(p, size=26)
if isinstance(y, str):
fig.suptitle("Sleep stage: {}".format(y), size=18)
# Return, save or show the figure
if not return_fig:
if out_path:
fig.savefig(out_path)
else:
plt.show()
plt.close(fig)
else:
return fig, axes
def plot_periods(X, y=None,
highlight_periods=True,
out_path=None,
return_fig=False,
**kwargs):
"""
Plots multiple consecutive periods of PSG data with annotated labels.
If neither out_path and return_fig are set, displays the figure and blocks.
Args:
X: (list) A list of ndarrays of PSG periods
y: (list) A list of epoch stage strings (optional)
highlight_periods: (bool) Plot vertical lines to separate epochs
out_path: (string) Optional path to save the figure to
return_fig: (bool) Return the figure instead of saving
(out_path is ignored)
**kwargs: (dict) Parameters passed to 'plot_period'
Returns:
Figure and axes objects if return_fig=True, otherwise None
"""
X = np.array(X)
if X.ndim == 3:
X = np.concatenate(X, axis=0)
if y is not None:
if len(y) < 15:
ys = '-'.join(y)
else:
ys = "{} stages (too long to show)".format(len(y))
else:
ys = "<Not specified>"
fig, axes = plot_period(X, ys, return_fig=True, **kwargs)
x_sepparations = [(len(X)//len(y)) * i for i in range(1, len(y))]
if highlight_periods:
for ax in axes:
for sep in x_sepparations:
ax.axvline(sep, color='red',
linestyle='--',
linewidth=1.5)
# Return, save or show the figure
if not return_fig:
if out_path:
fig.savefig(out_path)
else:
plt.show()
plt.close(fig)
else:
return fig, axes
|
the-stack_106_26409 |
# COPYRIGHT 2007 BY BBN TECHNOLOGIES CORP.
# BY USING THIS SOFTWARE THE USER EXPRESSLY AGREES: (1) TO BE BOUND BY
# THE TERMS OF THIS AGREEMENT; (2) THAT YOU ARE AUTHORIZED TO AGREE TO
# THESE TERMS ON BEHALF OF YOURSELF AND YOUR ORGANIZATION; (3) IF YOU OR
# YOUR ORGANIZATION DO NOT AGREE WITH THE TERMS OF THIS AGREEMENT, DO
# NOT CONTINUE. RETURN THE SOFTWARE AND ALL OTHER MATERIALS, INCLUDING
# ANY DOCUMENTATION TO BBN TECHNOLOGIES CORP.
# BBN GRANTS A NONEXCLUSIVE, ROYALTY-FREE RIGHT TO USE THIS SOFTWARE
# KNOWN AS THE OntoNotes DB Tool v. 0.9 (HEREINAFTER THE "SOFTWARE")
# SOLELY FOR RESEARCH PURPOSES. PROVIDED, YOU MUST AGREE TO ABIDE BY THE
# LICENSE AND TERMS STATED HEREIN. TITLE TO THE SOFTWARE AND ITS
# DOCUMENTATION AND ALL APPLICABLE COPYRIGHTS, TRADE SECRETS, PATENTS
# AND OTHER INTELLECTUAL RIGHTS IN IT ARE AND REMAIN WITH BBN AND SHALL
# NOT BE USED, REVEALED, DISCLOSED IN MARKETING OR ADVERTISEMENT OR ANY
# OTHER ACTIVITY NOT EXPLICITLY PERMITTED IN WRITING.
# NO WARRANTY. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
# KIND. THE SOFTWARE IS PROVIDED for RESEARCH PURPOSES ONLY. AS SUCH,
# IT MAY CONTAIN ERRORS, WHICH COULD CAUSE FAILURES OR LOSS OF DATA. TO
# THE MAXIMUM EXTENT PERMITTED BY LAW, BBN MAKES NO WARRANTIES, EXPRESS
# OR IMPLIED AS TO THE SOFTWARE, ITS CAPABILITIES OR FUNCTIONALITY,
# INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR
# ANY USE OF THE SOFTWARE. THE USER ASSUMES THE ENTIRE COST OF ALL
# NECESSARY REPAIR OR CORRECTION, EVEN IF BBN HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH A DEFECT OR DAMAGES. BBN MAKES NO WARRANTY THAT
# THE SOFTWARE WILL MEET THE USER REQUIREMENTS, OR WILL BE
# UNINTERRUPTED, TIMELY, SECURE, OR ERROR-FREE.
# LIMITATION OF LIABILITY. THE ENTIRE RISK AS TO THE RESULTS AND
# PERFORMANCE OF THE SOFTWARE IS ASSUMED BY THE USER. TO THE MAXIMUM
# EXTENT PERMITTED BY APPLICABLE LAW, BBN SHALL NOT BE LIABLE WITH
# RESPECT TO ANY SUBJECT MATTER OF THIS AGREEMENT UNDER ANY CONTRACT,
# NEGLIGENCE, STRICT LIABILITY OR OTHER THEORY FOR ANY DIRECT,
# CONSEQUENTIAL, RELIANCE, INCIDENTAL, SPECIAL, DIRECT OR INDIRECT
# DAMAGES WHATSOEVER (INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF
# BUSINESS PROFITS, OR BUSINESS INFORMATION, OR FOR BUSINESS
# INTERRUPTION, PERSONAL INJURY OR ANY OTHER LOSSES) RELATING TO (A)
# LOSS OR INACCURACY OF DATA OR COST OF PROCUREMENT OF SUBSTITUTE
# SYSTEM, SERVICES OR TECHNOLOGY, (B) THE USE OR INABILITY TO USE THE
# SOFTWARE; (C) UNAUTHORIZED ACCESS TO OR ALTERATION OF YOUR
# TRANSMISSIONS OR DATA; (D) ANY PERSONAL INJURY OR INJURY TO PROPERTY;
# OR (E) ANY OTHER USE OF THE SOFTWARE EVEN IF BBN HAS BEEN FIRST
# ADVISED OF THE POSSIBILITY OF ANY SUCH DAMAGES OR LOSSES.
# WITHOUT LIMITATION OF THE FOREGOING, THE USER AGREES TO COMMIT NO ACT
# WHICH, DIRECTLY OR INDIRECTLY, WOULD VIOLATE ANY U.S. LAW, REGULATION,
# OR TREATY, OR ANY OTHER INTERNATIONAL TREATY OR AGREEMENT TO WHICH THE
# UNITED STATES ADHERES OR WITH WHICH THE UNITED STATES COMPLIES,
# RELATING TO THE EXPORT OR RE-EXPORT OF ANY COMMODITIES, SOFTWARE, OR
# TECHNICAL DATA.
# author: sameer pradhan
"""
:mod:`ontology` -- Ontology Annotation
-------------------------------------------------
.. autoclass:: ontology
.. autoclass:: upper_model
.. autoclass:: sense_pool
.. autoclass:: sense_pool_collection
.. autoclass:: concept
.. autoclass:: feature
.. autoexception:: no_such_parent_concept_error
.. autoexception:: no_such_parent_sense_pool_error
"""
#---- standard python imports ----#
from __future__ import with_statement
import operator
import os.path
try:
import MySQLdb
except ImportError:
pass
import string
import sys
import re
import exceptions
import codecs
#---- xml specific imports ----#
from xml.etree import ElementTree
import xml.etree.cElementTree as ElementTree
#---- custom package imports ----#
import on
import on.common.log
from on.common.log import status
import on.common.util
import on.corpora
import on.corpora.tree
import on.corpora.proposition
import on.corpora.coreference
import on.corpora.name
import on.corpora.sense
from collections import defaultdict
from on.common.util import insert_ignoring_dups
class ontology:
def __init__(self, a_id, a_upper_model, a_sense_pool_collection, a_cursor=None):
self.id = a_id
self.upper_model = a_upper_model
self.sense_pool_collection = a_sense_pool_collection
def to_dot(self):
a_dot_string = self.upper_model.to_dot() + "\n\t" + \
self.sense_pool_collection.to_dot()
v_e_hash = {}
v_e_list = a_dot_string.split("\n\t")
for v_e in v_e_list:
if(not v_e_hash.has_key(v_e)):
v_e_hash[v_e] = v_e
else:
on.common.log.debug("ignoring duplicate vertex/edge", on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
a_dot_string = ""
for v_e in v_e_hash.keys():
a_dot_string = a_dot_string + "\n\t" + v_e
a_dot_string = a_dot_string.strip()
a_dot_string = "digraph UM {\n\t" + a_dot_string + "\n}"
return a_dot_string
def write_to_db(self, a_cursor):
self.upper_model.write_to_db(a_cursor)
self.sense_pool_collection.write_to_db(a_cursor)
# write the feature type table
on.corpora.ontology.feature_type.write_to_db(a_cursor)
def from_db(self, a_id, a_cursor):
a_ontology = on.corpora.ontology.ontology(a_id, None, None, a_cursor)
# might want to fetch the id from the database
a_upper_model = on.corpora.ontology.upper_model.from_db("upper_model@%s" % (a_id), a_cursor)
a_sense_pool_collection = on.corpora.ontology.sense_pool_collection.from_db("sense_pool_collection@%s" % (a_id), a_cursor)
a_ontology.upper_model = a_upper_model
a_ontology.sense_pool_collection = a_sense_pool_collection
return a_ontology
@staticmethod
@on.common.util.register_config("corpus", "data_in", required=False)
def from_files(config_or_ontology_dir):
""" Given: either a string representing a the path to the ontology
directory or a configuration file that defines the key
(corpus, data_in) representing the parent directory of the
ontology dir.
Return: an instance of the ontology loaded from the filesystem"""
def make_upper_model(um_fname):
status("Loading upper model ...")
with codecs.open(um_fname, "r", "utf8") as um_inf:
return on.corpora.ontology.upper_model(
"upper_model@ontology@on", um_inf.read())
def make_sense_pools(sp_dir):
status("Loading sense pools ...")
return on.corpora.ontology.sense_pool_collection(
"sense_pool_collection@ontology@on", sp_dir)
try:
ontology_dir = os.path.join(config_or_ontology_dir[
"corpus", "data_in"], "ontology")
except TypeError:
ontology_dir = config_or_ontology_dir
return ontology(
"ontology@on",
make_upper_model(os.path.join(ontology_dir, "upper-model.xml")),
make_sense_pools(os.path.join(ontology_dir, "sense-pools")))
class sense_pool_type(on.corpora.abstract_open_type_table):
type_hash = defaultdict(int)
@classmethod
def write_to_db(cls, a_cursor):
pass
class sense_pool_collection:
def __init__(self, a_id, root_dir, a_cursor=None):
self.sense_pools = []
self.id = a_id
if(a_cursor == None):
filenames = [ x for x in os.listdir(root_dir) if x[-4:] == ".xml" ]
# make one pass to fill the sense_pool_type hash so we can
# check for missing parent, related pools, etc.
for filename in filenames:
sense_pool_type(re.sub("\.xml$", "", filename))
for filename in filenames:
file_string = open("%s/%s" % (root_dir, filename)).read()
try:
a_sense_pool = sense_pool(re.sub("\.xml$", "", filename), file_string)
self.sense_pools.append(a_sense_pool)
except Exception:
on.common.log.report("ontology", "failed to initialize sense pool", fname=filename)
#except no_such_parent_concept_error:
# on.common.log.error("""
#found reference to a undefined concept, please correct the upper model
#definition file and reload the data. the reason for this action is
#that this concept is not created, and therefore any successor concepts
#would have a missing path to the root concept. just deleting this
#concept from the list of concepts won't help either because no
#particular sequence in which the concepts are loaded is assumed, and
#therefore there might have been a descendant that got added earlier
#which had this one as the parent, and since our assumption that the
#presense of this concept in the hash means that it would be created
#successfully does not hold, we will have to rectify the error and load
#the concepts once again.
#""")
#except no_such_parent_sense_pool_error:
# on.common.log.error("""
#found reference to a undefined sense pool, please correct the sense pool
#definition files and reload the data. the reason for this action is
#that this concept is not created, and therefore any successor concepts
#would have a missing path to the root sense pool/concept. just deleting this
#concept from the list of concepts won't help either because no
#particular sequence in which the concepts are loaded is assumed, and
#therefore there might have been a descendant that got added earlier
#which had this one as the parent, and since our assumption that the
#presense of this concept in the hash means that it would be created
#successfully does not hold, we will have to rectify the error and load
#the sense pools once again.
#""")
#print "e"
else:
pass
def to_dot(self, complete=False):
dot_string = ""
for a_sense_pool in self.sense_pools:
dot_string = dot_string + a_sense_pool.to_dot()
if(complete == True):
dot_string = "digraph UM {\n\t" + dot_string.strip() + "\n}"
return dot_string.strip()
def write_to_db(self, a_cursor):
for a_sense_pool in self.sense_pools:
a_sense_pool.write_to_db(a_cursor)
def from_db(self, a_id, a_cursor):
# create the object
a_sense_pool_collection = on.corpora.ontology.sense_pool_collection(a_id, None, a_cursor)
# add sense pools to
a_cursor.execute("""select concept_pool_type.id from concept_pool_type where concept_pool_type.type = 'pool'""")
pool_rows = a_cursor.fetchall()
for a_pool_row in pool_rows:
a_pool_id = a_pool_row["id"]
a_pool = on.corpora.ontology.sense_pool.from_db(a_pool_id, a_cursor)
a_sense_pool_collection.sense_pools.append(a_pool)
return a_sense_pool_collection
from_db = classmethod(from_db)
class sense_pool:
def __init__(self, a_sense_pool_id, a_sense_pool_string, a_cursor=None):
self.id = a_sense_pool_id # the file name of the .xml pool file
self.commentary = "" # the commentary tag in the .xml file
self.description = "" # the SPID field in the .xml file
self.spid = ""
self.fid = ""
self.name = ""
self.sense_list = []
self.parent_concepts_list = []
self.parent_pools_list = []
self.related_concepts_list = []
self.related_pools_list = []
if(a_cursor == None):
try:
a_sense_pool_tree = ElementTree.fromstring(a_sense_pool_string)
except Exception:
on.common.log.warning("there was some problem reading the XML file." + "\n" + a_sense_pool_string)
raise
self.description = on.common.util.get_attribute(a_sense_pool_tree, "SPID")
self.spid = self.description
self.fid = on.common.util.get_attribute(a_sense_pool_tree, "FID")
self.name = on.common.util.get_attribute(a_sense_pool_tree, "NAME")
for a_sense_tree in a_sense_pool_tree.findall(".//SENSE"):
for a_sense_id_tree in a_sense_tree.findall(".//SENSEID"):
a_sense_string = a_sense_id_tree.text
sense_contents = a_sense_string.split(".")
if len(sense_contents) not in [4,5]:
raise Exception("invalid senseid " + a_sense_string)
else:
a_lemma = sense_contents[0]
a_lang = sense_contents[1] if len(sense_contents) == 5 else None
if a_lang != "e":
continue
a_type = sense_contents[-3]
a_pos = sense_contents[-2]
a_num = sense_contents[-1]
if(a_type not in "cbayoj"):
raise Exception("invalid senseid annotator" + a_sense_string)
elif(a_type == "y"):
a_on_sense_string = "%s@%s@%s@omega" % (a_lemma, a_num, a_pos)
else:
a_on_sense_string = "%s@%s@%s" % (a_lemma, a_num, a_pos)
self.sense_list.append(a_on_sense_string)
for a_sub_to_tree in a_sense_pool_tree.findall(".//SUBTO"):
for a_sub_tag_tree in a_sub_to_tree.findall(".//SUBTAG"):
a_id = a_sub_tag_tree.text.split("=")[0]
# check if it is a concept or pool and add it to the appropriate list
if(on.common.util.matches_pool_id_specification(a_id)):
if(sense_pool_type.type_hash.has_key(a_id)):
self.parent_pools_list.append(a_id)
else:
on.common.log.warning("found an undefined sense pool '%s' as being a parent" % (a_id))
raise no_such_parent_sense_pool_error
# else assume it to be a concept (as there is no specific definition for it)
else:
if( concept_type.type_hash.has_key(a_id) ):
self.parent_concepts_list.append(a_id)
else:
on.common.log.warning("found an undefined concept '%s' as being a parent" % (a_id))
raise no_such_parent_concept_error
for a_relation_tree in a_sense_pool_tree.findall(".//RELATION"):
for a_relation_tag_tree in a_relation_tree.findall(".//RELATIONTAG"):
a_id = a_relation_tag_tree.text.split("=")[0]
# check if it is a concept or pool and add it to the appropriate list
if(on.common.util.matches_pool_id_specification(a_id)):
if(sense_pool_type.type_hash.has_key(a_id)):
self.related_pools_list.append(a_id)
else:
on.common.log.warning("found an undefined sense pool '%s' as being related" % (a_id))
raise no_such_parent_sense_pool_error
# else assume it to be a concept (as there is no specific definition for it)
else:
if( concept_type.type_hash.has_key(a_id) ):
self.related_concepts_list.append(a_id)
else:
on.common.log.warning("found an undefined concept '%s' as being related" % (a_id))
raise no_such_parent_concept_error
for a_commentary_tree in a_sense_pool_tree.findall(".//COMMENTARY"):
self.commentary = a_commentary_tree.text
on.common.log.debug("""
--------------------------------------------------------------------------------------------------------
sense pool
--------------------------------------------------------------------------------------------------------
description : %s
commentary : %s
senses : %s
parent concepts : %s
parent pools : %s
related concepts: %s
related pools : %s
--------------------------------------------------------------------------------------------------------
""" % (self.description, self.commentary, str(self.sense_list), str(self.parent_concepts_list), str(self.parent_pools_list), str(self.related_concepts_list), str(self.related_pools_list)), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
else:
pass
def __repr__(self):
return """
--------------------------------------------------------------------------------------------------------
sense pool
--------------------------------------------------------------------------------------------------------
description : %s
commentary : %s
senses : %s
parent concepts : %s
parent pools : %s
related concepts: %s
related pools : %s
--------------------------------------------------------------------------------------------------------
""" % (self.description, self.commentary, str(self.sense_list), str(self.parent_concepts_list), str(self.parent_pools_list), str(self.related_concepts_list), str(self.related_pools_list))
def to_dot(self):
dot_string = ""
a_sense_pool_id = self.id
a_sense_pool_id = on.common.util.format_for_dot(a_sense_pool_id)
for a_parent_concept in self.parent_concepts_list:
a_parent_concept = on.common.util.format_for_dot(a_parent_concept)
dot_string = dot_string + "\t\"" + a_parent_concept + "\" -> \"" + a_sense_pool_id + "\" [label=\"sub-concept\"];\n"
for a_parent_pool in self.parent_pools_list:
a_parent_pool = on.common.util.format_for_dot(a_parent_pool)
dot_string = dot_string + "\t\"" + a_parent_pool + "\" -> \"" + a_sense_pool_id + "\" [label=\"sub-pool\"];\n"
for a_related_concept in self.related_concepts_list:
a_related_concept = on.common.util.format_for_dot(a_related_concept)
dot_string = dot_string + "\t\"" + a_related_concept + "\" -> \"" + a_sense_pool_id + "\" [label=\"related-concept\"];\n"
for a_related_pool in self.related_pools_list:
a_related_pool = on.common.util.format_for_dot(a_related_pool)
dot_string = dot_string + "\t\"" + a_related_pool + "\" -> \"" + a_sense_pool_id + "\" [label=\"related-pool\"];\n"
for a_sense in self.sense_list:
a_sense = on.common.util.format_for_dot(a_sense)
dot_string = dot_string + "\t\"" + a_sense_pool_id + "\" -> \"" + a_sense + "\" [label=\"sense\"];\n"
return dot_string
sense_sql_table_name = "pool_sense"
sense_sql_create_statement = \
"""
create table pool_sense
(
id varchar(255),
sense_id varchar (255)
)
default character set utf8;
"""
sense_sql_insert_statement = \
"""insert into pool_sense
(
id,
sense_id
) values (%s, %s)
"""
def write_senses_to_db(self, cursor):
cursor.executemany("%s" % (self.sense_sql_insert_statement),
[ (self.id, a_sense_id) for a_sense_id in self.sense_list])
def write_parents_to_db(self, cursor):
data = []
for thinglist, thing in [[self.parent_concepts_list, "concept"],
[self.parent_pools_list, "pool"]]:
for a_parent_thing_id in thinglist:
# making an assumption (looking at the current snapshot of
# the data) that relations to concepts are always concepts
data.append((self.id, a_parent_thing_id, "pool", thing))
cursor.executemany("%s" % (on.corpora.ontology.concept.parent_sql_insert_statement), data)
def write_relations_to_db(self, cursor):
data = []
for a_related_concept_id in self.related_concepts_list:
a_tuple = (self.id, a_related_concept_id, "pool", "concept")
data.append(a_tuple)
for a_related_pool_id in self.related_pools_list:
a_tuple = (self.id, a_related_pool_id, "pool", "pool")
data.append(a_tuple)
cursor.executemany("%s" % (on.corpora.ontology.concept.relation_sql_insert_statement), data)
# this is the method that writes the concept to the database
def write_to_db(self, a_cursor):
insert_ignoring_dups(on.corpora.ontology.concept.sql_insert_statement, a_cursor,
self.id, self.spid, self.fid, self.name, self.commentary, "pool")
# write other features, relations and parents to the db
self.write_parents_to_db(a_cursor)
self.write_relations_to_db(a_cursor)
self.write_senses_to_db(a_cursor)
@staticmethod
def from_db(a_sense_pool_id, a_cursor):
a_sense_pool = on.corpora.ontology.sense_pool(a_sense_pool_id, None, a_cursor)
a_sense_pool.id = a_sense_pool_id
# lets fill the concept attributes first
a_cursor.execute("""select * from concept_pool_type where id = '%s'""" % (a_sense_pool_id))
sense_pool_type_rows = a_cursor.fetchall()
for a_sense_pool_type_row in sense_pool_type_rows:
a_sense_pool.spid = a_sense_pool_type_row["spid"]
a_sense_pool.fid = a_sense_pool_type_row["spid"]
a_sense_pool.name = a_sense_pool_type_row["name"]
a_sense_pool.commentary = a_sense_pool_type_row["commentary"]
a_cursor.execute("""select * from concept_pool_parent where id = '%s' and type = 'concept'""" % (a_sense_pool_id))
parent_concept_id_rows = a_cursor.fetchall()
for a_parent_concept_id_row in parent_concept_id_rows:
status("adding %s as parent concept" % (a_parent_concept_id_row["parent_id"]))
a_sense_pool.parent_concepts_list.append(a_parent_concept_id_row["parent_id"])
a_cursor.execute("""select * from concept_pool_parent where id = '%s' and type = 'pool'""" % (a_sense_pool_id))
parent_pool_id_rows = a_cursor.fetchall()
for a_parent_pool_id_row in parent_pool_id_rows:
status("adding %s as parent pool" % (a_parent_pool_id_row["parent_id"]))
a_sense_pool.parent_pools_list.append(a_parent_pool_id_row["parent_id"])
a_cursor.execute("""select * from concept_pool_relation where id = '%s' and relation_type='concept'""" % (a_sense_pool_id))
relation_concept_id_rows = a_cursor.fetchall()
for a_relation_concept_id_row in relation_concept_id_rows:
status("adding %s as being related concept" % (a_relation_pool_id_row["relation_id"]))
a_sense_pool.related_concepts_list.append(a_relation_pool_id_row["relation_id"])
a_cursor.execute("""select * from concept_pool_relation where id = '%s' and relation_type='pool'""" % (a_sense_pool_id))
relation_pool_id_rows = a_cursor.fetchall()
for a_relation_pool_id_row in relation_pool_id_rows:
status("adding %s as being related pool" % (a_relation_pool_id_row["relation_id"]))
a_sense_pool.related_pools_list.append(a_relation_pool_id_row["relation_id"])
a_cursor.execute("""select * from pool_sense where id = '%s'""" % (a_sense_pool_id))
sense_id_rows = a_cursor.fetchall()
for a_sense_id_row in sense_id_rows:
a_sense_id = a_sense_id_row["sense_id"]
status("adding %s as a sense in this pool" % (a_sense_id))
a_sense_pool.sense_list.append(a_sense_id)
return a_sense_pool
class no_such_parent_concept_error(exceptions.Exception):
pass
class no_such_parent_sense_pool_error(exceptions.Exception):
pass
class upper_model:
def __init__(self, a_id, a_um_string, a_cursor=None):
self.id = a_id
self.concepts = []
if(a_cursor == None):
try:
a_um_tree = ElementTree.fromstring(a_um_string) # lower case all the data in the upper model
except Exception:
on.common.log.warning("there was some problem reading the XML file." + "\n" +
a_um_string)
raise
# it is important to note that in the upper model, each sensepool is a concept definition
# make one pass over the concepts to fill the concept hash
for a_sensepool_tree in a_um_tree.findall(".//SENSEPOOL"):
a_concept_id = on.common.util.get_attribute(a_sensepool_tree, "SPID")
concept_type(a_concept_id)
# now let's create the actual concepts, and verify the validity using the aforefilled hash
k=0
for a_sensepool_tree in a_um_tree.findall(".//SENSEPOOL"):
try:
a_concept = concept(ElementTree.tostring(a_sensepool_tree))
self.concepts.append(a_concept)
except no_such_parent_concept_error:
on.common.log.error("""
found reference to a undefined concept, please correct the upper model
definition file and reload the data. the reason for this action is
that this concept is not created, and therefore any successor concepts
would have a missing path to the root concept. just deleting this
concept from the list of concepts won't help either because no
particular sequence in which the concepts are loaded is assumed, and
therefore there might have been a descendant that got added earlier
which had this one as the parent, and since our assumption that the
presense of this concept in the hash means that it would be created
successfully does not hold, we will have to rectify the error and load
the concepts once again.
""")
else:
pass
def to_dot(self, complete=False):
dot_string = ""
for a_concept in self.concepts:
dot_string = dot_string + "\n\t" + a_concept.to_dot()
if(complete == True):
dot_string = "digraph UM {\n\t" + dot_string.strip() + "\n}"
return dot_string.strip()
def write_to_db(self, a_cursor):
for a_concept in self.concepts:
a_concept.write_to_db(a_cursor)
def from_db(self, a_id, a_cursor):
a_upper_model = on.corpora.ontology.upper_model(a_id, None, a_cursor)
a_cursor.execute("""select concept_pool_type.id from concept_pool_type where concept_pool_type.type = 'concept'""")
concept_rows = a_cursor.fetchall()
for a_concept_row in concept_rows:
a_concept_id = a_concept_row["id"]
a_concept = on.corpora.ontology.concept.from_db(a_concept_id, a_cursor)
a_upper_model.concepts.append(a_concept)
return a_upper_model
from_db = classmethod(from_db)
class feature_type(on.corpora.abstract_open_type_table):
type_hash = defaultdict(int)
sql_table_name = "ontology_feature_type"
sql_create_statement = \
"""
create table ontology_feature_type
(
id varchar(255) not null collate utf8_bin primary key
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into ontology_feature_type
(
id
) values (%s)
"""
class feature:
def __init__(self, a_feature):
if(a_feature[0] != "+"
and
a_feature[0] != "-"):
on.common.log.warning("the feature string should have a + or - modifier along with it")
(a_modifier, a_type) = re.findall("^(\+|\-)?(.+)$", a_feature)[0]
self.modifier = a_modifier
self.type = feature_type(a_type)
def __repr__(self):
return "%s%s" % (self.modifier, self.type.id)
class concept_type(on.corpora.abstract_open_type_table):
type_hash = defaultdict(int)
@classmethod
def write_to_db(cls, a_cursor):
pass
class concept:
# global hash for consistency check
concept_hash = {}
def __init__(self, a_concept_string, a_cursor=None):
self.spid = "" #---- the SPID attribute
self.name = "" #---- the NAME attribute
self.fid = "" #---- the FID attribute
self.id = ""
self.features = []
self.parent_ids = []
self.relation_ids = []
self.commentaries = []
#self.sub_concept_names = []
if(a_cursor == None):
try:
on.common.log.debug("""
------------------------------ the concept string representation ---------------------------------------
%s
--------------------------------------------------------------------------------------------------------
""" % (a_concept_string), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
a_concept_tree = ElementTree.fromstring(a_concept_string)
except Exception:
on.common.log.warning("there was some problem reading the XML file." + "\n" + a_concept_string)
raise
self.spid = on.common.util.get_attribute(a_concept_tree, "SPID")
self.name = on.common.util.get_attribute(a_concept_tree, "NAME")
self.fid = on.common.util.get_attribute(a_concept_tree, "FID")
self.id = self.spid
on.common.log.debug("came to create concept: %s" % (self.spid), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
a_commentary_index = 0 # there can be multiple commentaries, so let's just tag them with an index
for a_commentary_tree in a_concept_tree.findall(".//COMMENTARY"):
a_commentary = a_commentary_tree.text
if(a_commentary == None):
a_commentary = ""
self.commentaries.append(a_commentary)
for a_feature_tree in a_concept_tree.findall(".//FEATURE"):
for a_featuretag_tree in a_feature_tree.findall(".//FEATURETAG"):
a_feature = feature(a_featuretag_tree.text.lower())
self.features.append(a_feature)
for a_relation_tree in a_concept_tree.findall(".//RELATION"):
for a_relationtag_tree in a_relation_tree.findall(".//RELATIONTAG"):
a_relation_id = a_relationtag_tree.text
# since relation is just another concept, we won't create a new relation class
if(not concept_type.type_hash.has_key(a_relation_id)):
on.common.log.warning("found an undefined concept '%s' as being related" % (a_relation_id))
raise no_such_parent_concept_error
self.relation_ids.append(a_relation_id)
for a_subto_tree in a_concept_tree.findall(".//SUBTO"):
for a_subtag_tree in a_subto_tree.findall(".//SUBTAG"):
a_parent_id = a_subtag_tree.text
# since parent is just another concept, we won't create a new class
if(not concept_type.type_hash.has_key(a_parent_id)):
on.common.log.warning("found an undefined concept '%s' as being a parent" % (a_parent_id))
raise no_such_parent_concept_error
self.parent_ids.append(a_parent_id)
on.common.log.debug("""
-------------------------------- the concept object contents ------------------------------------------
%s
--------------------------------------------------------------------------------------------------------
""" % (self), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
else:
pass
def __repr__(self):
return """
--------------------------------------------------------------------------------------------------------
concept
--------------------------------------------------------------------------------------------------------
spid: %s
fid: %s
name: %s
features: %s
parent concept ids: %s
related concept ids: %s
commantaries: %s
--------------------------------------------------------------------------------------------------------
""" % (self.spid, self.fid, self.name, str(self.features), str(self.parent_ids), str(self.relation_ids), " ".join(self.commentaries))
def to_dot(self):
dot_string = ""
a_concept_id = self.id
a_concept_id = on.common.util.format_for_dot(a_concept_id)
dot_string = dot_string + "\t\"" + a_concept_id + "\" [id=\"" + a_concept_id + "\", commentary=\"" + on.common.util.format_for_dot(" ".join(self.commentaries)) + "\"];\n"
for a_parent_concept_id in self.parent_ids:
a_parent_concept_id = on.common.util.format_for_dot(a_parent_concept_id)
dot_string = dot_string + "\t\"" + a_parent_concept_id + "\" -> \"" + a_concept_id + "\" [label=\"sub-concept\"];\n"
for a_relation_id in self.relation_ids:
a_relation_id = on.common.util.format_for_dot(a_relation_id)
dot_string = dot_string + "\t\"" + a_concept_id + "\" -> \"" + a_relation_id + "\" [label=\"related\"];\n"
return dot_string.strip()
sql_table_name = "concept_pool_type"
sql_create_statement = \
"""
create table concept_pool_type
(
id varchar(255) not null collate utf8_bin primary key,
spid varchar(255) not null,
fid varchar(255) not null,
name varchar(255) not null,
commentary varchar(5000),
type varchar(255)
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into concept_pool_type
(
id,
spid,
fid,
name,
commentary,
type
) values (%s, %s, %s, %s, %s, %s)
"""
parent_sql_table_name = "concept_pool_parent"
parent_sql_create_statement = \
"""
create table concept_pool_parent
(
id varchar(255),
parent_id varchar(255),
type varchar(255),
parent_type varchar(255)
)
default character set utf8;
"""
parent_sql_insert_statement = \
"""insert into concept_pool_parent
(
id,
parent_id,
type,
parent_type
) values (%s, %s, %s, %s)
"""
relation_sql_table_name = "concept_pool_relation"
relation_sql_create_statement = \
"""
create table concept_pool_relation
(
id varchar(255),
relation_id varchar(255),
type varchar(255),
relation_type varchar(255)
)
default character set utf8;
"""
relation_sql_insert_statement = \
"""insert into concept_pool_relation
(
id,
relation_id,
type,
relation_type
) values (%s, %s, %s, %s)
"""
feature_sql_table_name = "concept_pool_feature"
feature_sql_create_statement = \
"""
create table concept_pool_feature
(
id varchar(255),
feature_type varchar (255),
feature_modifier varchar (255)
)
default character set utf8;
"""
feature_sql_insert_statement = \
"""insert into concept_pool_feature
(
id,
feature_type,
feature_modifier
) values (%s, %s, %s)
"""
def write_parents_to_db(self, cursor):
data = []
for a_parent_id in self.parent_ids:
# making an assumption (looking at the current snapshot of
# the data) that relations to concepts are always concepts
a_tuple = (self.id, a_parent_id, "concept", "concept")
data.append(a_tuple)
cursor.executemany("%s" % (self.parent_sql_insert_statement), data)
def write_relations_to_db(self, cursor):
data = []
for a_relation_id in self.relation_ids:
# making an assumption (looking at the current snapshot of
# the data) that relations to concepts are always concepts
a_tuple = (self.id, a_relation_id, "concept", "concept")
data.append(a_tuple)
cursor.executemany("%s" % (self.relation_sql_insert_statement), data)
def write_features_to_db(self, cursor):
data = []
for a_feature_id in self.features:
# making an assumption (looking at the current snapshot of
# the data) that relations to concepts are always concepts
a_tuple = (self.id, a_feature_id.type.id, a_feature_id.modifier)
data.append(a_tuple)
cursor.executemany("%s" % (self.feature_sql_insert_statement), data)
# this is the method that writes the concept to the database
def write_to_db(self, a_cursor):
insert_ignoring_dups(self, a_cursor, self.id, self.spid, self.fid, self.name, " ".join(self.commentaries), "concept")
# write other features, relations and parents to the db
self.write_parents_to_db(a_cursor)
self.write_relations_to_db(a_cursor)
self.write_features_to_db(a_cursor)
@staticmethod
def from_db(a_concept_id, a_cursor=None):
a_concept = on.corpora.ontology.concept(None, a_cursor)
a_concept.id = a_concept_id
# lets fill the concept attributes first
a_cursor.execute("""select * from concept_pool_type where id = '%s'""" % (a_concept_id))
concept_pool_type_rows = a_cursor.fetchall()
for a_concept_pool_type_row in concept_pool_type_rows:
a_concept.spid = a_concept_pool_type_row["spid"]
a_concept.fid = a_concept_pool_type_row["spid"]
a_concept.name = a_concept_pool_type_row["name"]
a_concept.commentaries.append(a_concept_pool_type_row["commentary"])
a_cursor.execute("""select * from concept_pool_parent where id = '%s'""" % (a_concept_id))
parent_id_rows = a_cursor.fetchall()
for a_parent_id_row in parent_id_rows:
status("adding %s as parent" % (a_parent_id_row["parent_id"]))
a_concept.parent_ids.append(a_parent_id_row["parent_id"])
a_cursor.execute("""select * from concept_pool_relation where id = '%s'""" % (a_concept_id))
relation_id_rows = a_cursor.fetchall()
for a_relation_id_row in relation_id_rows:
status("adding %s as being related" % (a_relation_id_row["relation_id"]))
a_concept.relation_ids.append(a_relation_id_row["relation_id"])
a_cursor.execute("""select * from concept_pool_feature where id = '%s'""" % (a_concept_id))
feature_id_rows = a_cursor.fetchall()
for a_feature_id_row in feature_id_rows:
status("adding %s as a feature with %s modifier" % (a_feature_id_row["feature_type"], a_feature_id_row["feature_modifier"]))
a_feature = on.corpora.ontology.feature("%s%s" % (a_feature_id_row["feature_modifier"], a_feature_id_row["feature_type"]))
a_concept.features.append(a_feature)
return a_concept
|
the-stack_106_26412 | """
Break fastq sequences into smaller chunks. Eg. from nanopore reads we want smaller pieces
"""
import os
import sys
import argparse
from roblib import stream_fastq
__author__ = 'Rob Edwards'
def rewrite_fastq(inf, outf, sz, verbose):
"""
Rewrite a fastq file
:param inf: input fastq file
:param outf: output fastq file
:param sz: size of the DNA sequences to write
:param verbose: more output
:return:
"""
with open(outf, 'w') as out:
seqcounter = 0
for seqid, header, seq, qual in stream_fastq(inf):
posn = 0
while (posn < len(seq)-sz):
seqcounter += 1
out.write("@{} {}\n{}\n+\n{}\n".format(seqcounter, header, seq[posn:posn+sz], qual[posn:posn+sz]))
posn += sz
if posn < len(seq):
out.write("@{} {}\n{}\n+\n{}\n".format(seqcounter, header, seq[posn:], qual[posn:]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Read a fastq file and write the sequences smaller. Note does not preserve IDs!')
parser.add_argument('-f', help='input file', required=True)
parser.add_argument('-s', help='DNA fragment size', required=True, type=int)
parser.add_argument('-o', help='output file name', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
rewrite_fastq(args.f, args.o, args.s, args.v) |
the-stack_106_26415 | # ------------------------------------------------------------
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------
import json
import time
from dapr.clients import DaprClient
with DaprClient() as d:
id=0
while True:
id+=1
req_data = {
'id': id,
'message': 'hello world'
}
# Create a typed message with content type and body
resp = d.publish_event(
pubsub_name='pubsub',
topic_name='TOPIC_A',
data=json.dumps(req_data),
data_content_type='application/json',
)
# Print the request
print(req_data, flush=True)
time.sleep(2)
|
the-stack_106_26416 | from organisations.boundaries.management.base import BaseOsniCommand
from organisations.boundaries.osni import OsniLayer
from organisations.models import OrganisationDivision
class Command(BaseOsniCommand):
def handle(self, *args, **options):
url = "http://osni-spatial-ni.opendata.arcgis.com/datasets/563dc2ec3d9943428e3fe68966d40deb_3.geojson"
self.layer = OsniLayer(url, "PC_ID", "PC_NAME")
for feature in self.layer.features:
if "gss" in feature:
record = OrganisationDivision.objects.all().get(
official_identifier="gss:{}".format(feature["gss"])
)
self.import_boundary(record, feature)
else:
raise Exception("Expected GSS code")
self.stdout.write("...done!")
|
the-stack_106_26417 | from django.http import Http404
from django.test import RequestFactory, TestCase
from django.urls import reverse
from agreements.models import Agreement, Issuer
from agreements.views import issuer_search
class TestIssuerSearch(TestCase):
def setUp(self):
self.request = RequestFactory().get("/")
def test_no_issuers_raises_404(self):
with self.assertRaises(Http404):
issuer_search(self.request, "none")
def test_missing_issuer_raises_404(self):
Issuer.objects.create(name="name", slug="slug")
with self.assertRaises(Http404):
issuer_search(self.request, "missing")
def test_issuer_no_agreements(self):
Issuer.objects.create(name="A & B Bank", slug="a-b-bank")
response = self.client.get(
reverse("issuer_search", kwargs={"issuer_slug": "a-b-bank"})
)
self.assertContains(response, "A & B Bank")
def test_issuer_has_agreements(self):
issuer = Issuer.objects.create(name="A & B Bank", slug="a-b-bank")
for i in range(2):
filename = "agreement{}.pdf".format(i + 1)
Agreement.objects.create(
issuer=issuer, description=filename, file_name=filename, size=0
)
response = self.client.get(
reverse("issuer_search", kwargs={"issuer_slug": "a-b-bank"})
)
self.assertContains(response, "agreement1.pdf")
self.assertContains(response, "agreement2.pdf")
def test_multiple_issuers_with_same_slug_no_agreements_uses_latest(self):
Issuer.objects.create(name="A & B Bank", slug="a-b-bank")
Issuer.objects.create(name="A - B Bank", slug="a-b-bank")
response = self.client.get(
reverse("issuer_search", kwargs={"issuer_slug": "a-b-bank"})
)
self.assertContains(response, "A - B Bank")
def test_multiple_issuers_with_same_slug_uses_latest_agreement(self):
issuer = Issuer.objects.create(name="A & B Bank", slug="a-b-bank")
Agreement.objects.create(
issuer=issuer,
description="description",
file_name="filename",
size=0,
)
Issuer.objects.create(name="A - B Bank", slug="a-b-bank")
response = self.client.get(
reverse("issuer_search", kwargs={"issuer_slug": "a-b-bank"})
)
self.assertContains(response, "A & B Bank")
|
the-stack_106_26419 | t_search = pd.DataFrame(index = tag_list,
columns = ['search_num'])
# 페이스북 로그인 페이지로
driver = webdriver.Chrome('./chromedriver')
url = 'https://www.facebook.com/'
driver.get(url)
time.sleep(2)
# 로그인 완료
driver.find_element_by_xpath('//*[@id="email"]').send_keys(fb_id)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="pass"]').send_keys(fb_pw + '\n')
time.sleep(2)
#크롤링
l_search = []
for key, i in zip(tag_list[100:], [x for x in range(1,112+1)]):
print(f"{i}번째 {key} 크롤링 중입니다.")
driver.find_element_by_xpath('//*[@id="mount_0_0_Rd"]/div/div[1]/div/div[2]/div[2]/div/div/div/div/div/label').send_keys(Keys.CONTROL, 'a')
time.sleep(2)
driver.find_element_by_xpath('//*[@id="mount_0_0_Rd"]/div/div[1]/div/div[2]/div[2]/div/div/div/div/div/label').send_keys(Keys.DELETE)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="mount_0_0_Rd"]/div/div[1]/div/div[2]/div[2]/div/div/div/div/div/label').send_keys(f'#{key}' + '\n')
time.sleep(2)
# 검색량 긁어오기
html = BeautifulSoup(driver.page_source, 'html.parser')
time.sleep(2)
text = html.select('div.bi6gxh9e span.d2edcug0')[1].text[:2+1]
l_search.append(text)
|
the-stack_106_26420 | import random
import warnings
import time
import numpy as np
import compressors
from sklearn.preprocessing import MinMaxScaler
class Client:
def __init__(self, client_id, group=None, train_data={'x' : [],'y' : []}, eval_data={'x' : [],'y' : []}, model=None):
self._model = model
self.id = client_id
self.group = group
self.train_data = train_data
self.eval_data = eval_data
def train(self, num_epochs=1, batch_size=10, minibatch=None):
"""Trains on self.model using the client's train_data.
Args:
num_epochs: Number of epochs to train. Unsupported if minibatch is provided (minibatch has only 1 epoch)
batch_size: Size of training batches.
minibatch: fraction of client's data to apply minibatch sgd,
None to use FedAvg
Return:
comp: number of FLOPs executed in training process
num_samples: number of samples used in training
update: set of weights
update_size: number of bytes in update
"""
# TODO: Swap this for a with statement and a timer
train_start = time.time()
if minibatch is None:
data = self.train_data
comp, update = self.model.train(data, num_epochs, batch_size)
else:
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {'x': xs, 'y': ys}
# Minibatch trains for only 1 epoch - multiple local epochs don't make sense!
num_epochs = 1
comp, update = self.model.train(data, num_epochs, num_data)
num_train_samples = len(data['y'])
train_stop = time.time()
train_time = int(round(train_stop - train_start))
before_nonzeros = 0
after_nonzeros = 0
### Start Compression
compress_start = time.time()
layers_to_compress = [6]
update = np.array(update, dtype=object)
for i in layers_to_compress:
actual_shape = update[i].shape
flattended = update[i].flatten()
before_nonzeros += np.count_nonzero(flattended)
compressed_flat = flattended
# For calculating sparsity constraints
flat_sz = flattended.size
bits = flattended.size * flattended.itemsize * 8
B_j = int(np.floor(0.80 * bits))
scaler = MinMaxScaler()
Xsc = flattended.reshape((flat_sz, 1))
Xsc = scaler.fit_transform(Xsc)
try:
Cg, _ = compressors.sparse_kmeans(
gradient=Xsc,
budget=B_j
)
compressed_flat = scaler.inverse_transform(Cg.reshape((flat_sz, 1)))
compressed_flat = compressed_flat.flatten()
except BaseException as err:
print("ERROR")
print(f"Unexpected err={err}, type(err)={type(err)}")
print(flattended)
exit
after_nonzeros += np.count_nonzero(compressed_flat)
update[i] = compressed_flat.reshape(actual_shape)
compress_end = time.time()
### End Compression
compress_time = int(round(compress_end - compress_start))
train_time_secs = train_time + compress_time
return comp, num_train_samples, before_nonzeros, after_nonzeros, update, train_time_secs
def test(self, set_to_use='test'):
"""Tests self.model on self.test_data.
Args:
set_to_use. Set to test on. Should be in ['train', 'test'].
Return:
dict of metrics returned by the model.
"""
assert set_to_use in ['train', 'test', 'val']
if set_to_use == 'train':
data = self.train_data
elif set_to_use == 'test' or set_to_use == 'val':
data = self.eval_data
return self.model.test(data)
@property
def num_test_samples(self):
"""Number of test samples for this client.
Return:
int: Number of test samples for this client
"""
if self.eval_data is None:
return 0
return len(self.eval_data['y'])
@property
def num_train_samples(self):
"""Number of train samples for this client.
Return:
int: Number of train samples for this client
"""
if self.train_data is None:
return 0
return len(self.train_data['y'])
@property
def num_samples(self):
"""Number samples for this client.
Return:
int: Number of samples for this client
"""
train_size = 0
if self.train_data is not None:
train_size = len(self.train_data['y'])
test_size = 0
if self.eval_data is not None:
test_size = len(self.eval_data['y'])
return train_size + test_size
@property
def model(self):
"""Returns this client reference to model being trained"""
return self._model
@model.setter
def model(self, model):
warnings.warn('The current implementation shares the model among all clients.'
'Setting it on one client will effectively modify all clients.')
self._model = model
|
the-stack_106_26423 | import os, datetime, zipfile
from datetime import date
from os import path
def export(config):
MODULE_PATH = os.path.join(os.path.dirname(__file__))
print("\n========================");
print("EXPORTING PROJECT INTO ARCHIVE")
print("\n")
project_folder = config["project"]["path"]
if (not (os.path.isdir(project_folder))):
print(" ERROR: Invalid Project folder.")
return False
db_name = config["db"]["name"]
db_user = config["db"]["user"]
db_pass = config["db"]["password"]
db_dump = config["db"]["file"]
try:
if (os.system(f"mysqldump --user={db_user} --password={db_pass} {db_name} > {db_dump}") > 0):
raise
except Exception:
print(" ERROR: Failed to connect to the database.");
print("\nABORTED.\n")
return False
print(" OK: Database exported.");
zip_name = config["project"]["zip"]
included_folders = config["project"]["folders"]
excluded_folders = config["project"]["excluded_folders"]
root_files = config["project"]["root_files"]
date_format = date.today().strftime('%d %m %Y')
zip_name = zip_name + " - " + date_format
zip_name_final = (zip_name + ".backup.zip")
try:
zf = zipfile.ZipFile(zip_name_final, "w")
except Exception as err:
print(" ERROR: Failed to create archive:", err)
print("\nABORTED.\n")
return False
try:
zf.write(db_dump)
os.remove(db_dump)
except Exception as err:
print(" ERROR:", err)
print("\nABORTED.\n")
return False
starting_dir = os.getcwd()
os.chdir(project_folder)
try:
for file in root_files:
zf.write(file)
for folder in included_folders:
for dirname, dirs, files in os.walk(folder, topdown = True):
dirs[:] = [d for d in dirs if d not in excluded_folders]
zf.write(dirname)
for filename in files:
zf.write(os.path.join(dirname, filename))
zf.close()
except Exception as err:
print(" ERROR:", err)
print("\nABORTED.\n")
return False
print(" SUCCESS: Project added to the archive (" + zip_name_final + ").");
os.chdir(starting_dir)
return zip_name_final |
the-stack_106_26424 | from __future__ import unicode_literals
import mock
import pytest
from hermes_python.hermes import Hermes
from hermes_python.ontology import MqttOptions
from hermes_python.ontology.dialogue import StartSessionMessage, SessionInitNotification, ContinueSessionMessage
from hermes_python.ontology.injection import InjectionRequestMessage
HOST = "localhost"
DUMMY_INTENT_NAME = "INTENT"
def test_initialization():
h = Hermes(HOST)
assert 0 == len(h.ffi.dialogue._c_callback_subscribe_intent)
def test_initialization_with_options():
mqtt_opts = MqttOptions()
h = Hermes(mqtt_options=mqtt_opts)
assert h.mqtt_options.broker_address == "localhost:1883"
def test_context_manager_enter_calls_ffi_api():
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.release_connection.assert_called_once()
@mock.patch("hermes_python.api.ffi.tts.hermes_drop_tts_facade")
@mock.patch("hermes_python.api.ffi.tts.hermes_protocol_handler_tts_facade")
@mock.patch("hermes_python.api.ffi.injection.hermes_drop_injection_facade")
@mock.patch("hermes_python.api.ffi.injection.hermes_protocol_handler_injection_facade")
@mock.patch("hermes_python.api.ffi.feedback.hermes_drop_sound_feedback_facade")
@mock.patch("hermes_python.api.ffi.feedback.hermes_protocol_handler_sound_feedback_facade")
@mock.patch("hermes_python.api.ffi.dialogue.hermes_drop_dialogue_facade")
@mock.patch("hermes_python.api.ffi.dialogue.hermes_protocol_handler_dialogue_facade")
@mock.patch("hermes_python.api.ffi.hermes_destroy_mqtt_protocol_handler")
@mock.patch("hermes_python.api.ffi.hermes_protocol_handler_new_mqtt_with_options")
def test_context_manager_enter_exit(hermes_protocol_handler_new_mqtt,
hermes_destroy_mqtt_protocol_handler,
hermes_protocol_handler_dialogue_facade, hermes_drop_dialogue_facade,
hermes_protocol_handler_sound_feedback_facade, hermes_drop_sound_feedback_facade,
hermes_protocol_handler_injection_facade, hermes_drop_injection_facade,
hermes_protocol_handler_tts_facade, hermes_drop_tts_facade):
with Hermes(HOST) as h:
pass
hermes_protocol_handler_new_mqtt.assert_called_once()
hermes_protocol_handler_dialogue_facade.assert_called_once()
hermes_drop_dialogue_facade.assert_called_once()
hermes_protocol_handler_sound_feedback_facade.assert_called_once()
hermes_drop_sound_feedback_facade.assert_called_once()
hermes_protocol_handler_injection_facade.assert_called_once()
hermes_drop_injection_facade.assert_called_once()
hermes_protocol_handler_tts_facade.assert_called_once()
hermes_drop_tts_facade.assert_called_once()
hermes_destroy_mqtt_protocol_handler.assert_called_once()
@mock.patch("hermes_python.api.ffi.feedback.hermes_protocol_handler_sound_feedback_facade")
@mock.patch("hermes_python.api.ffi.dialogue.hermes_protocol_handler_dialogue_facade")
@mock.patch("hermes_python.api.ffi.dialogue.hermes_drop_dialogue_facade")
@mock.patch("hermes_python.api.ffi.hermes_protocol_handler_new_mqtt_with_options")
def test_context_manager_catches_exceptions(hermes_protocol_handler_new_mqtt, mocked_hermes_drop_dialogue_facade,
hermes_protocol_handler_dialogue_facade,
hermes_protocol_handler_sound_feedback_facade):
hermes_protocol_handler_dialogue_facade.side_effect = Exception("An exception occured!")
with pytest.raises(Exception):
with Hermes(HOST) as h:
pass
def test_subscribe_intent_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_intent(DUMMY_INTENT_NAME, user_callback)
h.__exit__(None, None, None)
h.ffi.dialogue.register_subscribe_intent_handler.assert_called_once_with(DUMMY_INTENT_NAME, user_callback, h)
def test_subscribe_intents_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_intents(user_callback)
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.dialogue.register_subscribe_intents_handler.assert_called_once_with(user_callback, h)
def test_subscribe_session_started_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_session_started(user_callback)
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.dialogue.register_session_started_handler.assert_called_once_with(user_callback, h)
def test_subscribe_session_queued_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_session_queued(user_callback)
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.dialogue.register_session_queued_handler.assert_called_once_with(user_callback, h)
def test_subscribe_session_ended_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_session_ended(user_callback)
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.dialogue.register_session_ended_handler.assert_called_once_with(user_callback, h)
def test_subscribe_intent_not_recognized_correctly_registers_callback():
def user_callback(hermes, intentMessage):
pass
h = Hermes(HOST)
h.ffi = mock.MagicMock()
h.__enter__()
h.subscribe_intent_not_recognized(user_callback)
h.__exit__(None, None, None)
h.ffi.establish_connection.assert_called_once()
h.ffi.dialogue.register_intent_not_recognized_handler.assert_called_once_with(user_callback, h)
def test_start_session_notification_1():
h = Hermes(HOST)
h.ffi = mock.MagicMock()
with h:
h.publish_start_session_notification(None, "welcome !", "custom_data")
start_session_notification_message = StartSessionMessage(SessionInitNotification("welcome !"), "custom_data", None)
h.ffi.dialogue.publish_start_session.assert_called_once_with(start_session_notification_message)
def test_start_session_notification_2():
h = Hermes(HOST)
h.ffi = mock.MagicMock()
with h:
h.publish_start_session_notification(None, None, "custom_data", "yup!")
start_session_notification_message = StartSessionMessage(SessionInitNotification("yup!"), "custom_data", None)
h.ffi.dialogue.publish_start_session.assert_called_once_with(start_session_notification_message)
def test_start_session_notification_text_parameter_takes_precedence_over_session_initiation_text():
h = Hermes(HOST)
h.ffi = mock.MagicMock()
with h:
h.publish_start_session_notification(None, "test", "custom_data", "yup!")
start_session_notification_message = StartSessionMessage(SessionInitNotification("yup!"), "custom_data", None)
h.ffi.dialogue.publish_start_session.assert_called_once_with(start_session_notification_message)
class TestContinueSession(object):
def test_continue_session_slot_filler(self):
h = Hermes(HOST)
h.ffi = mock.MagicMock()
with h:
h.publish_continue_session("session_id", "Tell me what the missing slot is", ["intent1"], None, False,
"missing_slot")
continue_session_message = ContinueSessionMessage("session_id", "Tell me what the missing slot is", ["intent1"],
None, False, "missing_slot")
h.ffi.dialogue.publish_continue_session.assert_called_once_with(continue_session_message)
class TestInjection(object):
# These tests are disabled as long as the injection API is stabilized.
# def test_requesting_injection_status(self):
# h = Hermes(HOST)
# h.ffi = mock.MagicMock()
#
#
# with h:
# h.request_injection_status()
#
# h.ffi.injection.publish_injection_status_request.assert_called_once()
#
# def test_correctly_subscribing_to_injection_status(self):
# def injection_request_cb(callback, injection_status):
# pass
#
# h = Hermes(HOST)
# h.ffi = mock.MagicMock()
#
# with h:
# h.subscribe_injection_status(injection_request_cb)
#
# h.ffi.injection.register_subscribe_injection_status.assert_called_once_with(injection_request_cb, h)
def test_correctly_requesting_injection(self):
h = Hermes(HOST)
h.ffi = mock.MagicMock()
injection_request = InjectionRequestMessage([], dict())
with h:
h.request_injection(injection_request)
h.ffi.injection.publish_injection_request.assert_called_once_with(injection_request)
|
the-stack_106_26425 | # -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
from com.aliyun.api.gateway.sdk.common import constant
from com.aliyun.api.gateway.sdk.auth import sha_hmac256
from com.aliyun.api.gateway.sdk.util import DateUtil
import time
def build_sign_str(uri=None, method=None, headers=None, body=None):
lf = '\n'
string_to_sign = []
string_to_sign.append(method)
string_to_sign.append(lf)
if constant.HTTP_HEADER_ACCEPT in headers and headers[constant.HTTP_HEADER_ACCEPT]:
string_to_sign.append(headers[constant.HTTP_HEADER_ACCEPT])
string_to_sign.append(lf)
if constant.HTTP_HEADER_CONTENT_MD5 in headers and headers[constant.HTTP_HEADER_CONTENT_MD5]:
string_to_sign.append(headers[constant.HTTP_HEADER_CONTENT_MD5])
string_to_sign.append(lf)
if constant.HTTP_HEADER_CONTENT_TYPE in headers and headers[constant.HTTP_HEADER_CONTENT_TYPE]:
string_to_sign.append(headers[constant.HTTP_HEADER_CONTENT_TYPE])
string_to_sign.append(lf)
if constant.HTTP_HEADER_DATE in headers and headers[constant.HTTP_HEADER_DATE]:
string_to_sign.append(headers[constant.HTTP_HEADER_DATE])
string_to_sign.append(lf)
string_to_sign.append(_format_header(headers=headers))
string_to_sign.append(_build_resource(uri=uri, body=body))
for _index, _value in enumerate(string_to_sign):
if not isinstance(_value, str):
string_to_sign[_index] = _value.decode("utf-8")
return ''.join(string_to_sign)
def _build_resource(uri="", body={}):
if uri.__contains__("?"):
uri_array = uri.split("?")
uri = uri_array[0]
query_str = uri_array[1]
if not body:
body = {}
if query_str:
query_str_array = query_str.split("&")
for query in query_str_array:
query_array = query.split("=")
if query_array[0] not in body:
body[query_array[0]] = query_array[1]
resource = []
resource.append(uri)
if body:
resource.append("?")
param_list = list(body.keys())
param_list.sort()
first = True
for key in param_list:
if not first:
resource.append("&")
first = False
if body[key]:
resource.append(key)
resource.append("=")
resource.append(body[key])
else:
resource.append(key)
if resource is None:
return ''
return "".join(str(x) for x in resource)
def convert_utf8(input_string):
if isinstance(input_string, unicode):
input_string = input_string.encode('utf-8')
return input_string
def _format_header(headers={}):
lf = '\n'
temp_headers = []
if len(headers) > 0:
header_list = list(headers.keys())
header_list.sort()
signature_headers = []
for k in header_list:
if k.startswith("X-Ca-"):
temp_headers.append(k)
temp_headers.append(":")
temp_headers.append(str(headers[k]))
temp_headers.append(lf)
signature_headers.append(k)
headers[constant.X_CA_SIGNATURE_HEADERS] = ','.join(signature_headers)
return ''.join(temp_headers)
|
the-stack_106_26426 | from tapiriik.settings import WEB_ROOT, SPORTTRACKS_OPENFIT_ENDPOINT, SPORTTRACKS_CLIENT_ID, SPORTTRACKS_CLIENT_SECRET
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, LapIntensity, Lap
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.sessioncache import SessionCache
from tapiriik.database import cachedb
from django.urls import reverse
import pytz
from datetime import timedelta
import dateutil.parser
from dateutil.tz import tzutc
import requests
import json
import re
import urllib.parse
import logging
logger = logging.getLogger(__name__)
class SportTracksService(ServiceBase):
ID = "sporttracks"
DisplayName = "SportTracks"
DisplayAbbreviation = "ST"
AuthenticationType = ServiceAuthenticationType.OAuth
OpenFitEndpoint = SPORTTRACKS_OPENFIT_ENDPOINT
SupportsHR = True
AuthenticationNoFrame = True
""" Other Basketball
Other Boxing
Other Climbing
Other Driving
Other Flying
Other Football
Other Gardening
Other Kitesurf
Other Sailing
Other Soccer
Other Tennis
Other Volleyball
Other Windsurf
Running Hashing
Running Hills
Running Intervals
Running Orienteering
Running Race
Running Road
Running Showshoe
Running Speed
Running Stair
Running Track
Running Trail
Running Treadmill
Cycling Hills
Cycling Indoor
Cycling Intervals
Cycling Mountain
Cycling Race
Cycling Road
Cycling Rollers
Cycling Spinning
Cycling Track
Cycling Trainer
Swimming Open Water
Swimming Pool
Swimming Race
Walking Geocaching
Walking Hiking
Walking Nordic
Walking Photography
Walking Snowshoe
Walking Treadmill
Skiing Alpine
Skiing Nordic
Skiing Roller
Skiing Snowboard
Rowing Canoe
Rowing Kayak
Rowing Kitesurf
Rowing Ocean Kayak
Rowing Rafting
Rowing Rowing Machine
Rowing Sailing
Rowing Standup Paddling
Rowing Windsurf
Skating Board
Skating Ice
Skating Inline
Skating Race
Skating Track
Gym Aerobics
Gym Elliptical
Gym Plyometrics
Gym Rowing Machine
Gym Spinning
Gym Stair Climber
Gym Stationary Bike
Gym Strength
Gym Stretching
Gym Treadmill
Gym Yoga
"""
_activityMappings = {
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain": ActivityType.MountainBiking,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"snowboarding": ActivityType.Snowboarding,
"skiing": ActivityType.DownhillSkiing,
"nordic": ActivityType.CrossCountrySkiing,
"skating": ActivityType.Skating,
"swimming": ActivityType.Swimming,
"rowing": ActivityType.Rowing,
"elliptical": ActivityType.Elliptical,
"gym": ActivityType.Gym,
"standup paddling": ActivityType.StandUpPaddling,
"other": ActivityType.Other
}
_reverseActivityMappings = {
ActivityType.Running: "running",
ActivityType.Cycling: "cycling",
ActivityType.Walking: "walking",
ActivityType.MountainBiking: "cycling: mountain",
ActivityType.Hiking: "walking: hiking",
ActivityType.CrossCountrySkiing: "skiing: nordic", # Equipment.Bindings.IsToeOnly ??
ActivityType.DownhillSkiing: "skiing",
ActivityType.Snowboarding: "skiing: snowboarding",
ActivityType.Skating: "skating",
ActivityType.Swimming: "swimming",
ActivityType.Rowing: "rowing",
ActivityType.Elliptical: "gym: elliptical",
ActivityType.Gym: "gym",
ActivityType.StandUpPaddling: "rowing: standup paddling",
ActivityType.Other: "other"
}
SupportedActivities = list(_reverseActivityMappings.keys())
_tokenCache = SessionCache("sporttracks", lifetime=timedelta(minutes=115), freshen_on_get=False)
def WebInit(self):
self.UserAuthorizationURL = "https://api.sporttracks.mobi/oauth2/authorize?response_type=code&client_id=%s&state=mobi_api" % SPORTTRACKS_CLIENT_ID
def _getAuthHeaders(self, serviceRecord=None):
token = self._tokenCache.Get(serviceRecord.ExternalID)
if not token:
if not serviceRecord.Authorization or "RefreshToken" not in serviceRecord.Authorization:
# When I convert the existing users, people who didn't check the remember-credentials box will be stuck in limbo
raise APIException("User not upgraded to OAuth", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
# Use refresh token to get access token
# Hardcoded return URI to get around the lack of URL reversing without loading up all the Django stuff
params = {"grant_type": "refresh_token", "refresh_token": serviceRecord.Authorization["RefreshToken"], "client_id": SPORTTRACKS_CLIENT_ID, "client_secret": SPORTTRACKS_CLIENT_SECRET, "redirect_uri": "https://tapiriik.com/auth/return/sporttracks"}
response = requests.post("https://api.sporttracks.mobi/oauth2/token", data=urllib.parse.urlencode(params), headers={"Content-Type": "application/x-www-form-urlencoded"})
if response.status_code != 200:
if response.status_code >= 400 and response.status_code < 500:
raise APIException("Could not retrieve refreshed token %s %s" % (response.status_code, response.text), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Could not retrieve refreshed token %s %s" % (response.status_code, response.text))
token = response.json()["access_token"]
self._tokenCache.Set(serviceRecord.ExternalID, token)
return {"Authorization": "Bearer %s" % token}
def RetrieveAuthorizationToken(self, req, level):
from tapiriik.services import Service
# might consider a real OAuth client
code = req.GET.get("code")
params = {"grant_type": "authorization_code", "code": code, "client_id": SPORTTRACKS_CLIENT_ID, "client_secret": SPORTTRACKS_CLIENT_SECRET, "redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "sporttracks"})}
response = requests.post("https://api.sporttracks.mobi/oauth2/token", data=urllib.parse.urlencode(params), headers={"Content-Type": "application/x-www-form-urlencoded"})
if response.status_code != 200:
print(response.text)
raise APIException("Invalid code")
access_token = response.json()["access_token"]
refresh_token = response.json()["refresh_token"]
uid_res = requests.post("https://api.sporttracks.mobi/api/v2/system/connect", headers={"Authorization": "Bearer %s" % access_token})
uid = uid_res.json()["user"]["uid"]
return (uid, {"RefreshToken": refresh_token})
def RevokeAuthorization(self, serviceRecord):
pass # Can't revoke these tokens :(
def DeleteCachedData(self, serviceRecord):
cachedb.sporttracks_meta_cache.remove({"ExternalID": serviceRecord.ExternalID})
def DownloadActivityList(self, serviceRecord, exhaustive=False):
headers = self._getAuthHeaders(serviceRecord)
activities = []
exclusions = []
pageUri = self.OpenFitEndpoint + "/fitnessActivities.json"
activity_tz_cache_raw = cachedb.sporttracks_meta_cache.find_one({"ExternalID": serviceRecord.ExternalID})
activity_tz_cache_raw = activity_tz_cache_raw if activity_tz_cache_raw else {"Activities":[]}
activity_tz_cache = dict([(x["ActivityURI"], x["TZ"]) for x in activity_tz_cache_raw["Activities"]])
while True:
logger.debug("Req against " + pageUri)
res = requests.get(pageUri, headers=headers)
try:
res = res.json()
except ValueError:
raise APIException("Could not decode activity list response %s %s" % (res.status_code, res.text))
for act in res["items"]:
activity = UploadedActivity()
activity.ServiceData = {"ActivityURI": act["uri"]}
if len(act["name"].strip()):
activity.Name = act["name"]
# Longstanding ST.mobi bug causes it to return negative partial-hour timezones as "-2:-30" instead of "-2:30"
fixed_start_time = re.sub(r":-(\d\d)", r":\1", act["start_time"])
activity.StartTime = dateutil.parser.parse(fixed_start_time)
if isinstance(activity.StartTime.tzinfo, tzutc):
activity.TZ = pytz.utc # The dateutil tzutc doesn't have an _offset value.
else:
activity.TZ = pytz.FixedOffset(activity.StartTime.tzinfo.utcoffset(activity.StartTime).total_seconds() / 60) # Convert the dateutil lame timezones into pytz awesome timezones.
activity.StartTime = activity.StartTime.replace(tzinfo=activity.TZ)
activity.EndTime = activity.StartTime + timedelta(seconds=float(act["duration"]))
activity.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=float(act["duration"])) # OpenFit says this excludes paused times.
# Sometimes activities get returned with a UTC timezone even when they are clearly not in UTC.
if activity.TZ == pytz.utc:
if act["uri"] in activity_tz_cache:
activity.TZ = pytz.FixedOffset(activity_tz_cache[act["uri"]])
else:
# So, we get the first location in the activity and calculate the TZ from that.
try:
firstLocation = self._downloadActivity(serviceRecord, activity, returnFirstLocation=True)
except APIExcludeActivity:
pass
else:
try:
activity.CalculateTZ(firstLocation, recalculate=True)
except:
# We tried!
pass
else:
activity.AdjustTZ()
finally:
activity_tz_cache[act["uri"]] = activity.StartTime.utcoffset().total_seconds() / 60
logger.debug("Activity s/t " + str(activity.StartTime))
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=float(act["total_distance"]))
types = [x.strip().lower() for x in act["type"].split(":")]
types.reverse() # The incoming format is like "walking: hiking" and we want the most specific first
activity.Type = None
for type_key in types:
if type_key in self._activityMappings:
activity.Type = self._activityMappings[type_key]
break
if not activity.Type:
exclusions.append(APIExcludeActivity("Unknown activity type %s" % act["type"], activity_id=act["uri"], user_exception=UserException(UserExceptionType.Other)))
continue
activity.CalculateUID()
activities.append(activity)
if not exhaustive or "next" not in res or not len(res["next"]):
break
else:
pageUri = res["next"]
logger.debug("Writing back meta cache")
cachedb.sporttracks_meta_cache.update({"ExternalID": serviceRecord.ExternalID}, {"ExternalID": serviceRecord.ExternalID, "Activities": [{"ActivityURI": k, "TZ": v} for k, v in activity_tz_cache.items()]}, upsert=True)
return activities, exclusions
def _downloadActivity(self, serviceRecord, activity, returnFirstLocation=False):
activityURI = activity.ServiceData["ActivityURI"]
headers = self._getAuthHeaders(serviceRecord)
activityData = requests.get(activityURI, headers=headers)
activityData = activityData.json()
if "clock_duration" in activityData:
activity.EndTime = activity.StartTime + timedelta(seconds=float(activityData["clock_duration"]))
activity.Private = "sharing" in activityData and activityData["sharing"] != "public"
activity.GPS = False # Gets set back if there is GPS data
if "notes" in activityData:
activity.Notes = activityData["notes"]
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilojoules, value=float(activityData["calories"]))
activity.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(activityData["elevation_gain"]) if "elevation_gain" in activityData else None, loss=float(activityData["elevation_loss"]) if "elevation_loss" in activityData else None)
activity.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=activityData["avg_heartrate"] if "avg_heartrate" in activityData else None, max=activityData["max_heartrate"] if "max_heartrate" in activityData else None)
activity.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=activityData["avg_cadence"] if "avg_cadence" in activityData else None, max=activityData["max_cadence"] if "max_cadence" in activityData else None)
activity.Stats.Power = ActivityStatistic(ActivityStatisticUnit.Watts, avg=activityData["avg_power"] if "avg_power" in activityData else None, max=activityData["max_power"] if "max_power" in activityData else None)
laps_info = []
laps_starts = []
if "laps" in activityData:
laps_info = activityData["laps"]
for lap in activityData["laps"]:
laps_starts.append(dateutil.parser.parse(lap["start_time"]))
lap = None
for lapinfo in laps_info:
lap = Lap()
activity.Laps.append(lap)
lap.StartTime = dateutil.parser.parse(lapinfo["start_time"])
lap.EndTime = lap.StartTime + timedelta(seconds=lapinfo["clock_duration"])
if "type" in lapinfo:
lap.Intensity = LapIntensity.Active if lapinfo["type"] == "ACTIVE" else LapIntensity.Rest
if "distance" in lapinfo:
lap.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=float(lapinfo["distance"]))
if "duration" in lapinfo:
lap.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=lapinfo["duration"])
if "calories" in lapinfo:
lap.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilojoules, value=lapinfo["calories"])
if "elevation_gain" in lapinfo:
lap.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(lapinfo["elevation_gain"])))
if "elevation_loss" in lapinfo:
lap.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, loss=float(lapinfo["elevation_loss"])))
if "max_speed" in lapinfo:
lap.Stats.Speed.update(ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, max=float(lapinfo["max_speed"])))
if "max_speed" in lapinfo:
lap.Stats.Speed.update(ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, max=float(lapinfo["max_speed"])))
if "avg_speed" in lapinfo:
lap.Stats.Speed.update(ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, avg=float(lapinfo["avg_speed"])))
if "max_heartrate" in lapinfo:
lap.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=float(lapinfo["max_heartrate"])))
if "avg_heartrate" in lapinfo:
lap.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(lapinfo["avg_heartrate"])))
if lap is None: # No explicit laps => make one that encompasses the entire activity
lap = Lap()
activity.Laps.append(lap)
lap.Stats = activity.Stats
lap.StartTime = activity.StartTime
lap.EndTime = activity.EndTime
elif len(activity.Laps) == 1:
activity.Stats.update(activity.Laps[0].Stats) # Lap stats have a bit more info generally.
activity.Laps[0].Stats = activity.Stats
timerStops = []
if "timer_stops" in activityData:
for stop in activityData["timer_stops"]:
timerStops.append([dateutil.parser.parse(stop[0]), dateutil.parser.parse(stop[1])])
def isInTimerStop(timestamp):
for stop in timerStops:
if timestamp >= stop[0] and timestamp < stop[1]:
return True
if timestamp >= stop[1]:
return False
return False
# Collate the individual streams into our waypoints.
# Global sample rate is variable - will pick the next nearest stream datapoint.
# Resampling happens on a lookbehind basis - new values will only appear their timestamp has been reached/passed
wasInPause = False
currentLapIdx = 0
lap = activity.Laps[currentLapIdx]
streams = []
for stream in ["location", "elevation", "heartrate", "power", "cadence", "distance"]:
if stream in activityData:
streams.append(stream)
stream_indices = dict([(stream, -1) for stream in streams]) # -1 meaning the stream has yet to start
stream_lengths = dict([(stream, len(activityData[stream])/2) for stream in streams])
# Data comes as "stream":[timestamp,value,timestamp,value,...]
stream_values = {}
for stream in streams:
values = []
for x in range(0,int(len(activityData[stream])/2)):
values.append((activityData[stream][x * 2], activityData[stream][x * 2 + 1]))
stream_values[stream] = values
currentOffset = 0
def streamVal(stream):
nonlocal stream_values, stream_indices
return stream_values[stream][stream_indices[stream]][1]
def hasStreamData(stream):
nonlocal stream_indices, streams
return stream in streams and stream_indices[stream] >= 0
while True:
advance_stream = None
advance_offset = None
for stream in streams:
if stream_indices[stream] + 1 == stream_lengths[stream]:
continue # We're at the end - can't advance
if advance_offset is None or stream_values[stream][stream_indices[stream] + 1][0] - currentOffset < advance_offset:
advance_offset = stream_values[stream][stream_indices[stream] + 1][0] - currentOffset
advance_stream = stream
if not advance_stream:
break # We've hit the end of every stream, stop
# Advance streams sharing the current timestamp
for stream in streams:
if stream == advance_stream:
continue # For clarity, we increment this later
if stream_indices[stream] + 1 == stream_lengths[stream]:
continue # We're at the end - can't advance
if stream_values[stream][stream_indices[stream] + 1][0] == stream_values[advance_stream][stream_indices[advance_stream] + 1][0]:
stream_indices[stream] += 1
stream_indices[advance_stream] += 1 # Advance the key stream for this waypoint
currentOffset = stream_values[advance_stream][stream_indices[advance_stream]][0] # Update the current time offset
waypoint = Waypoint(activity.StartTime + timedelta(seconds=currentOffset))
if hasStreamData("location"):
waypoint.Location = Location(streamVal("location")[0], streamVal("location")[1], None)
activity.GPS = True
if returnFirstLocation:
return waypoint.Location
if hasStreamData("elevation"):
if not waypoint.Location:
waypoint.Location = Location(None, None, None)
waypoint.Location.Altitude = streamVal("elevation")
if hasStreamData("heartrate"):
waypoint.HR = streamVal("heartrate")
if hasStreamData("power"):
waypoint.Power = streamVal("power")
if hasStreamData("cadence"):
waypoint.Cadence = streamVal("cadence")
if hasStreamData("distance"):
waypoint.Distance = streamVal("distance")
inPause = isInTimerStop(waypoint.Timestamp)
waypoint.Type = WaypointType.Regular if not inPause else WaypointType.Pause
if wasInPause and not inPause:
waypoint.Type = WaypointType.Resume
wasInPause = inPause
# We only care if it's possible to start a new lap, i.e. there are more left
if currentLapIdx + 1 < len(laps_starts):
if laps_starts[currentLapIdx + 1] < waypoint.Timestamp:
# A new lap has started
currentLapIdx += 1
lap = activity.Laps[currentLapIdx]
lap.Waypoints.append(waypoint)
if returnFirstLocation:
return None # I guess there were no waypoints?
if activity.CountTotalWaypoints():
activity.GetFlatWaypoints()[0].Type = WaypointType.Start
activity.GetFlatWaypoints()[-1].Type = WaypointType.End
activity.Stationary = False
else:
activity.Stationary = True
return activity
def DownloadActivity(self, serviceRecord, activity):
return self._downloadActivity(serviceRecord, activity)
def UploadActivity(self, serviceRecord, activity):
activityData = {}
# Props to the SportTracks API people for seamlessly supprting activities with or without TZ data.
activityData["start_time"] = activity.StartTime.isoformat()
if activity.Name:
activityData["name"] = activity.Name
if activity.Notes:
activityData["notes"] = activity.Notes
activityData["sharing"] = "public" if not activity.Private else "private"
activityData["type"] = self._reverseActivityMappings[activity.Type]
def _resolveDuration(obj):
if obj.Stats.TimerTime.Value is not None:
return obj.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value
if obj.Stats.MovingTime.Value is not None:
return obj.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value
return (obj.EndTime - obj.StartTime).total_seconds()
def _mapStat(dict, key, val, naturalValue=False):
if val is not None:
if naturalValue:
val = round(val)
dict[key] = val
_mapStat(activityData, "clock_duration", (activity.EndTime - activity.StartTime).total_seconds())
_mapStat(activityData, "duration", _resolveDuration(activity)) # This has to be set, otherwise all time shows up as "stopped" :(
_mapStat(activityData, "total_distance", activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_mapStat(activityData, "calories", activity.Stats.Energy.asUnits(ActivityStatisticUnit.Kilojoules).Value, naturalValue=True)
_mapStat(activityData, "elevation_gain", activity.Stats.Elevation.Gain)
_mapStat(activityData, "elevation_loss", activity.Stats.Elevation.Loss)
_mapStat(activityData, "max_speed", activity.Stats.Speed.Max)
_mapStat(activityData, "avg_heartrate", activity.Stats.HR.Average)
_mapStat(activityData, "max_heartrate", activity.Stats.HR.Max)
_mapStat(activityData, "avg_cadence", activity.Stats.Cadence.Average)
_mapStat(activityData, "max_cadence", activity.Stats.Cadence.Max)
_mapStat(activityData, "avg_power", activity.Stats.Power.Average)
_mapStat(activityData, "max_power", activity.Stats.Power.Max)
activityData["laps"] = []
lapNum = 0
for lap in activity.Laps:
lapNum += 1
lapinfo = {
"number": lapNum,
"start_time": lap.StartTime.isoformat(),
"type": "REST" if lap.Intensity == LapIntensity.Rest else "ACTIVE"
}
_mapStat(lapinfo, "clock_duration", (lap.EndTime - lap.StartTime).total_seconds()) # Required too.
_mapStat(lapinfo, "duration", _resolveDuration(lap)) # This field is required for laps to be created.
_mapStat(lapinfo, "distance", lap.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value) # Probably required.
_mapStat(lapinfo, "calories", lap.Stats.Energy.asUnits(ActivityStatisticUnit.Kilojoules).Value, naturalValue=True)
_mapStat(lapinfo, "elevation_gain", lap.Stats.Elevation.Gain)
_mapStat(lapinfo, "elevation_loss", lap.Stats.Elevation.Loss)
_mapStat(lapinfo, "max_speed", lap.Stats.Speed.Max)
_mapStat(lapinfo, "avg_heartrate", lap.Stats.HR.Average)
_mapStat(lapinfo, "max_heartrate", lap.Stats.HR.Max)
activityData["laps"].append(lapinfo)
if not activity.Stationary:
timer_stops = []
timer_stopped_at = None
def stream_append(stream, wp, data):
stream += [round((wp.Timestamp - activity.StartTime).total_seconds()), data]
location_stream = []
distance_stream = []
elevation_stream = []
heartrate_stream = []
power_stream = []
cadence_stream = []
for lap in activity.Laps:
for wp in lap.Waypoints:
if wp.Location and wp.Location.Latitude and wp.Location.Longitude:
stream_append(location_stream, wp, [wp.Location.Latitude, wp.Location.Longitude])
if wp.HR:
stream_append(heartrate_stream, wp, round(wp.HR))
if wp.Distance:
stream_append(distance_stream, wp, wp.Distance)
if wp.Cadence or wp.RunCadence:
stream_append(cadence_stream, wp, round(wp.Cadence) if wp.Cadence else round(wp.RunCadence))
if wp.Power:
stream_append(power_stream, wp, wp.Power)
if wp.Location and wp.Location.Altitude:
stream_append(elevation_stream, wp, wp.Location.Altitude)
if wp.Type == WaypointType.Pause and not timer_stopped_at:
timer_stopped_at = wp.Timestamp
if wp.Type != WaypointType.Pause and timer_stopped_at:
timer_stops.append([timer_stopped_at, wp.Timestamp])
timer_stopped_at = None
activityData["elevation"] = elevation_stream
activityData["heartrate"] = heartrate_stream
activityData["power"] = power_stream
activityData["cadence"] = cadence_stream
activityData["distance"] = distance_stream
activityData["location"] = location_stream
activityData["timer_stops"] = [[y.isoformat() for y in x] for x in timer_stops]
headers = self._getAuthHeaders(serviceRecord)
headers.update({"Content-Type": "application/json"})
upload_resp = requests.post(self.OpenFitEndpoint + "/fitnessActivities.json", data=json.dumps(activityData), headers=headers)
if upload_resp.status_code != 200:
if upload_resp.status_code == 401:
raise APIException("ST.mobi trial expired", block=True, user_exception=UserException(UserExceptionType.AccountExpired, intervention_required=True))
raise APIException("Unable to upload activity %s" % upload_resp.text)
return upload_resp.json()["uris"][0]
|
the-stack_106_26428 | import sys
import gzip
import json
import pytest
import numpy as np
import yaml
from aizynthfinder.context.config import Configuration
from aizynthfinder.context.policy import ExpansionPolicy, FilterPolicy
from aizynthfinder.context.stock import Stock
from aizynthfinder.mcts.node import Node
from aizynthfinder.chem import Molecule, TreeMolecule, RetroReaction
from aizynthfinder.mcts.mcts import SearchTree
from aizynthfinder.analysis import TreeAnalysis
from aizynthfinder.utils.trees import (
AndOrSearchTreeBase,
TreeNodeMixin,
SplitAndOrTree,
)
from aizynthfinder.utils.serialization import MoleculeDeserializer
def pytest_addoption(parser):
parser.addoption(
"--finder_config",
help="the configuration file for the aizynthfinder",
)
parser.addoption(
"--stocks",
nargs="+",
help="the stocks to use in the aizynthfinder",
)
parser.addoption("--policy", help="the policy to use in the aizynthfinder")
parser.addoption(
"--run_integration",
action="store_true",
default=False,
help="run integration tests",
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--run_integration"):
return
skip_integration = pytest.mark.skip(reason="need --run_integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
def pytest_configure(config):
config.addinivalue_line(
"markers", "integration: this one is for integration tests."
)
@pytest.fixture
def add_cli_arguments():
saved_argv = list(sys.argv)
def wrapper(args):
sys.argv = [sys.argv[0]] + args.split(" ")
yield wrapper
sys.argv = saved_argv
@pytest.fixture
def default_config():
return Configuration()
@pytest.fixture
def filter_policy(default_config):
policy = FilterPolicy(default_config)
return policy
@pytest.fixture
def fresh_tree(default_config):
return SearchTree(config=default_config, root_smiles=None)
@pytest.fixture
def generate_root(default_config):
def wrapper(smiles):
return Node.create_root(smiles, tree=None, config=default_config)
return wrapper
@pytest.fixture
def mock_expansion_policy(mocker, simple_actions):
mocked_get_action = mocker.patch(
"aizynthfinder.context.policy.ExpansionPolicy.get_actions"
)
def wrapper(mol):
mocked_get_action.return_value = simple_actions(mol)
return mocked_get_action.return_value
return wrapper
@pytest.fixture
def load_reaction_tree(shared_datadir):
def wrapper(filename, index=0):
filename = str(shared_datadir / filename)
with open(filename, "r") as fileobj:
trees = json.load(fileobj)
if isinstance(trees, dict):
return trees
else:
return trees[index]
return wrapper
@pytest.fixture
def mocked_reaction(mocker):
"""
Fixture for creating a mocked Reaction object
Will return a function that should be called with the parent of the reaction
and the TreeMolecule objects that should be returned when calling ``apply``.
"""
def wrapper(parent, return_value):
class MockedReaction(mocker.MagicMock):
@property
def index(self):
return 0
@property
def mol(self):
return parent
@property
def reactants(self):
return [return_value] if return_value else []
def apply(self, *_):
return self.reactants
return MockedReaction()
return wrapper
@pytest.fixture
def mock_get_actions(mocker, mocked_reaction):
"""
Fixture for mocking the call to the ``get_actions`` method of the Policy class,
used to return reactions and probabilities of these actions.
Will return a function that should be called will:
- the parent TreeMolecule object
- the SMILES of the State object of the Node that will be calling ``get_actions``
- a list of the the SMILES of the molecules that will be returned by the Reaction class
- a list of probabilities for each action that should be returned by ``get_actions``
the function will create the TreeMolecule objects that should be returned by the Reaction classes,
and it will return them to the caller.
"""
actions = {}
def get_action(mols):
key = tuple(mol.smiles for mol in mols)
return actions[key]
mocked_get_actions = mocker.patch(
"aizynthfinder.context.policy.ExpansionPolicy.get_actions"
)
mocked_get_actions.side_effect = get_action
def wrapper(parent, key_smiles, child_smiles_list, probs):
rxn_objs = []
mol_objs_list = []
for child_smiles in child_smiles_list:
if not child_smiles:
rxn_objs.append(mocked_reaction(parent, None))
continue
mol_objs = [
TreeMolecule(parent=parent, smiles=smiles) for smiles in child_smiles
]
mol_objs_list.append(mol_objs)
rxn_objs.append(mocked_reaction(parent, mol_objs))
actions[key_smiles] = rxn_objs, probs
return mol_objs_list
return wrapper
@pytest.fixture
def mock_policy_model(mocker):
class MockedKerasModel(mocker.MagicMock):
@property
def input(self):
pass
@property
def output(self):
pass
def predict(self, *_):
pass
mocker.patch.object(
MockedKerasModel, "input", mocker.PropertyMock(return_value=np.zeros((3, 3)))
)
mocker.patch.object(
MockedKerasModel, "output", mocker.PropertyMock(return_value=np.zeros((3, 3)))
)
mocker.patch.object(
MockedKerasModel,
"predict",
mocker.MagicMock(return_value=np.array([[0.2, 0.7, 0.1]])),
)
return mocker.patch(
"aizynthfinder.utils.models.load_keras_model", return_value=MockedKerasModel
)
@pytest.fixture
def mock_stock(tmpdir):
"""
Fixture for setting up stock of inchi keys in a textfile.
Will return a function that should be called with any number of Molecule objects as arguments
"""
def wrapper(config, *molecules):
molecules = [
Molecule(smiles=mol) if isinstance(mol, str) else mol for mol in molecules
]
filename = str(tmpdir / "stock.txt")
with open(filename, "w") as fileobj:
fileobj.write("\n".join([mol.inchi_key for mol in molecules]))
config.stock.load(filename, "stock")
config.stock.select("stock")
return wrapper
@pytest.fixture
def expansion_policy(default_config):
policy = ExpansionPolicy(default_config)
return policy
@pytest.fixture
def setup_analysis(default_config, shared_datadir, tmpdir, mock_stock):
mock_stock(
default_config, "N#Cc1cccc(N)c1F", "O=C(Cl)c1ccc(F)cc1", "CN1CCC(Cl)CC1", "O"
)
with gzip.open(shared_datadir / "full_search_tree.json.gz", "rb") as gzip_obj:
with open(tmpdir / "full_search_tree.json", "wb") as fileobj:
fileobj.write(gzip_obj.read())
tree = SearchTree.from_json(tmpdir / "full_search_tree.json", default_config)
nodes = list(tree.graph())
def wrapper(scorer=None):
return TreeAnalysis(tree, scorer=scorer), nodes
return wrapper
@pytest.fixture
def setup_complete_tree(fresh_tree, mocker, mock_stock):
tree = fresh_tree
state1 = mocker.MagicMock()
state1.mols = [
TreeMolecule(
parent=None,
transform=0,
smiles="CN1CCC(C(=O)c2cccc(NC(=O)c3ccc(F)cc3)c2F)CC1",
)
]
state1.in_stock_list = [False]
state1.score = 0.049
state1.is_solved = False
state1.max_transforms = state1.mols[0].transform
node1 = mocker.MagicMock()
node1.state = state1
node1.parent = None
node1.tree = tree
node1.is_expanded = True
action1 = (
"([C:2]-[CH;D3;+0:1](-[C:3])-[C;H0;D3;+0:4](=[O;H0;D1;+0:6])-[c:5])"
">>(Cl-[CH;D3;+0:1](-[C:2])-[C:3]).(N#[C;H0;D2;+0:4]-[c:5]).([OH2;D0;+0:6])"
)
reaction1 = RetroReaction(state1.mols[0], action1)
reaction1.apply()
node1.__getitem__.return_value = {"action": reaction1}
tree.root = node1
state2 = mocker.MagicMock()
state2.mols = [
TreeMolecule(parent=state1.mols[0], smiles=smiles)
for smiles in ["CN1CCC(Cl)CC1", "N#Cc1cccc(NC(=O)c2ccc(F)cc2)c1F", "O"]
]
state2.max_transforms = state2.mols[0].transform
state2.in_stock_list = [True, False, True]
state2.score = 0.68
state2.is_solved = False
node2 = mocker.MagicMock()
node2.parent = node1
node2.is_expanded = True
node2.state = state2
node2.tree = tree
node1.promising_child.return_value = node2
node1.children.return_value = [node2]
action2 = (
"([O;D1;H0:2]=[C;H0;D3;+0:1](-[c:3])-[NH;D2;+0:4]-[c:5])"
">>(Cl-[C;H0;D3;+0:1](=[O;D1;H0:2])-[c:3]).([NH2;D1;+0:4]-[c:5])"
)
reaction2 = RetroReaction(state2.mols[1], action2)
reaction2.apply()
node2.__getitem__.return_value = {"action": reaction2}
state3 = mocker.MagicMock()
state3.mols = [
TreeMolecule(parent=state2.mols[1], smiles=smiles)
for smiles in ["N#Cc1cccc(N)c1F", "O=C(Cl)c1ccc(F)cc1"]
]
state3.max_transforms = state3.mols[0].transform
state3.in_stock_list = [True, True]
state3.stock = mocker.MagicMock()
state3.stock.__contains__.side_effect = [False, True, False, True, True, True]
state3.score = 0.99
state3.is_solved = True
node3 = mocker.MagicMock()
node3.parent = node2
node3.tree = tree
node3.state = state3
node2.promising_child.return_value = node3
node2.children.return_value = [node3]
node3.children.return_value = []
return tree, [node1, node2, node3]
@pytest.fixture
def set_default_prior(default_config):
default_config.use_prior = False
def wrapper(prior):
default_config.default_prior = prior
yield wrapper
default_config.use_prior = True
@pytest.fixture
def setup_search(fresh_tree, generate_root):
tree = fresh_tree
tree.initialize()
def wrapper(smiles):
root = generate_root(smiles)
tree.add_root(root)
return tree, root
return wrapper
@pytest.fixture
def simple_actions():
# These templated reactions are taken from the full USPTO data set
# action1 and action2 can be applied, action3 cannot
def wrapper(mol):
actions = {
"CCCCOc1ccc(CC(=O)N(C)O)cc1": [
"([#8:4]-[N;H0;D3;+0:5](-[C;D1;H3:6])-[C;H0;D3;+0:1](-[C:2])=[O;D1;H0:3])"
">>(Cl-[C;H0;D3;+0:1](-[C:2])=[O;D1;H0:3]).([#8:4]-[NH;D2;+0:5]-[C;D1;H3:6])",
"([C:2]-[CH2;D2;+0:1]-[O;H0;D2;+0:3]-[c:4])>>(Br-[CH2;D2;+0:1]-[C:2]).([OH;D1;+0:3]-[c:4])",
"([C:4]-[N;H0;D3;+0:5](-[C:6])-[C;H0;D3;+0:1](-[C:2])=[O;D1;H0:3])>>"
"(O-[C;H0;D3;+0:1](-[C:2])=[O;D1;H0:3]).([C:4]-[NH;D2;+0:5]-[C:6])",
]
}
action1, action2, action3 = actions[mol.smiles]
action_list = [
RetroReaction(mol, action1, metadata={"dummy": 1}),
RetroReaction(mol, action2, metadata={"dummy": 2}),
RetroReaction(mol, action3, metadata={"dummy": 3}),
]
prior_list = [0.7, 0.5, 0.3]
return action_list, prior_list
return wrapper
@pytest.fixture
def stock():
stock = Stock()
return stock
@pytest.fixture
def write_yaml(tmpdir):
filename = str(tmpdir / "test.yaml")
def wrapper(dict_):
with open(filename, "w") as fileobj:
yaml.dump(dict_, fileobj)
return filename
return wrapper
@pytest.fixture
def setup_analysis_andor_tree(default_config, shared_datadir, mock_stock): # noqa
mock_stock(
default_config,
"Nc1ccc(NC(=S)Nc2ccccc2)cc1",
"Cc1ccc2nc3ccccc3c(Cl)c2c1",
"Nc1ccccc1",
"Nc1ccc(N=C=S)cc1",
"Cc1ccc2nc3ccccc3c(Br)c2c1",
"Nc1ccc(Br)cc1",
)
class BasicAndOrTree(AndOrSearchTreeBase):
def __init__(self, filename, config):
super().__init__(config)
self._mol_nodes = []
with open(filename, "r") as fileobj:
dict_ = json.load(fileobj)
mol_deser = MoleculeDeserializer(dict_["molecules"])
self.root = AndOrNode(dict_["tree"], config, mol_deser, self)
@property
def mol_nodes(self):
return self._mol_nodes
def one_iteration(self):
return False
def routes(self):
return SplitAndOrTree(self.root, self.config.stock).routes
class AndOrNode(TreeNodeMixin):
def __init__(self, dict_, config, molecules, tree):
self.tree = tree
self.config = config
self._children = [
AndOrNode(child, config, molecules, tree) for child in dict_["children"]
]
if "reaction" in dict_:
self._obj = RetroReaction(
molecules[dict_["reaction"]["mol"]],
dict_["reaction"]["smarts"],
dict_["reaction"]["index"],
dict_["reaction"].get("metadata", {}),
)
self._solved = all(child.prop["solved"] for child in self._children)
else:
self.tree._mol_nodes.append(self)
self._obj = molecules[dict_["mol"]]
self._solved = self._obj in self.config.stock
@property
def prop(self):
obj_key = "reaction" if isinstance(self._obj, RetroReaction) else "mol"
return {obj_key: self._obj, "solved": self._solved}
@property
def children(self):
return self._children
tree = BasicAndOrTree(str(shared_datadir / "and_or_tree.json"), default_config)
def wrapper(scorer=None):
return TreeAnalysis(tree, scorer=scorer)
return wrapper
|
the-stack_106_26431 | """
Action class for Jaseci
Each action has an id, name, timestamp and it's set of edges.
"""
from .item import item
from jaseci.actions.live_actions import live_actions
# ACTION_PACKAGE = 'jaseci.actions.'
class action(item):
"""
Action class for Jaseci
preset_in_out holds a set of parameters in the form of lists of context
objects that are to be used from whereever those contexts are attached
e.g., (nodes, edges, walkers, etc). This is used by Jac's runtime
engine to support preset actions in nodes
access_list is used by walker to decide what to trigger
"""
def __init__(self, preset_in_out=None, access_list=None,
*args, **kwargs):
self.preset_in_out = preset_in_out # Not using _ids convention
self.access_list = access_list
super().__init__(*args, **kwargs)
def trigger(self, param_list, scope):
"""
param_list should be passed as list of values to lib functions
Also note that Jac stores preset_in_out as input/output list of hex
ids since preset_in_out doesn't use _ids convention
"""
result = live_actions[
self.value](*param_list,
meta={'m_id': scope.parent._m_id,
'h': scope.parent._h, 'scope': scope})
return result
|
the-stack_106_26433 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a defid node can load multiple wallet files
"""
import os
import shutil
import time
from test_framework.test_framework import DefiTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class MultiWalletTest(DefiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory:"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = "BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], 'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5.importprivkey(privkey=node.get_genesis_keys().operatorPrivKey)
node.generate(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = "Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generate(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generate(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generate(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(4.0)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 4.0)
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
the-stack_106_26434 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a batch job for performing Tensorflow Model Analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tempfile
import apache_beam as beam
import tensorflow as tf
import tensorflow_model_analysis as tfma
try:
# Absolute import is preferred after 0.13 release, in which the path below
# will be available in TFX package and will be a dependency of chicago taxi
# example.
from tfx.examples.chicago_taxi.trainer import taxi # pylint: disable=g-import-not-at-top
except ImportError:
from trainer import taxi # pylint: disable=g-import-not-at-top
def process_tfma(eval_result_dir,
schema_file,
input_csv=None,
big_query_table=None,
eval_model_dir=None,
max_eval_rows=None,
pipeline_args=None):
"""Runs a batch job to evaluate the eval_model against the given input.
Args:
eval_result_dir: A directory where the evaluation result should be written
to.
schema_file: A file containing a text-serialized Schema that describes the
eval data.
input_csv: A path to a csv file which should be the input for evaluation.
This can only be set if big_query_table is None.
big_query_table: A BigQuery table name specified as DATASET.TABLE which
should be the input for evaluation. This can only be set if input_csv is
None.
eval_model_dir: A directory where the eval model is located.
max_eval_rows: Number of rows to query from BigQuery.
pipeline_args: additional DataflowRunner or DirectRunner args passed to the
beam pipeline.
Raises:
ValueError: if input_csv and big_query_table are not specified correctly.
"""
if input_csv == big_query_table and input_csv is None:
raise ValueError(
'one of --input_csv or --big_query_table should be provided.')
slice_spec = [
tfma.slicer.SingleSliceSpec(),
tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])
]
schema = taxi.read_schema(schema_file)
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_dir,
add_metrics_callbacks=[
tfma.post_export_metrics.calibration_plot_and_prediction_histogram(),
tfma.post_export_metrics.auc_plots()
])
with beam.Pipeline(argv=pipeline_args) as pipeline:
if input_csv:
csv_coder = taxi.make_csv_coder(schema)
raw_data = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
input_csv, skip_header_lines=1)
| 'ParseCSV' >> beam.Map(csv_coder.decode))
else:
assert big_query_table
query = taxi.make_sql(big_query_table, max_eval_rows, for_eval=True)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True))
| 'CleanData' >>
beam.Map(lambda x: (taxi.clean_raw_data_dict(x, raw_feature_spec))))
# Examples must be in clean tf-example format.
coder = taxi.make_proto_coder(schema)
_ = (
raw_data
| 'ToSerializedTFExample' >> beam.Map(coder.encode)
|
'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
output_path=eval_result_dir))
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--eval_model_dir',
help='Input path to the model which will be evaluated.')
parser.add_argument(
'--eval_result_dir',
help='Output directory in which the model analysis result is written.')
parser.add_argument(
'--big_query_table',
help='BigQuery path to input examples which will be evaluated.')
parser.add_argument(
'--input_csv',
help='CSV file containing raw data which will be evaluated.')
parser.add_argument(
'--max_eval_rows',
help='Maximum number of rows to evaluate on.',
default=None,
type=int)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, pipeline_args = parser.parse_known_args()
if known_args.eval_result_dir:
eval_result_dir = known_args.eval_result_dir
else:
eval_result_dir = tempfile.mkdtemp()
process_tfma(
eval_result_dir,
input_csv=known_args.input_csv,
big_query_table=known_args.big_query_table,
eval_model_dir=known_args.eval_model_dir,
max_eval_rows=known_args.max_eval_rows,
schema_file=known_args.schema_file,
pipeline_args=pipeline_args)
if __name__ == '__main__':
main()
|
the-stack_106_26435 | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from fibo.util.chia_logging import initialize_logging
from fibo.util.config import load_config
from fibo.util.default_root import DEFAULT_ROOT_PATH
from fibo.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(net_config["self_hostname"], port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("chia_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
|
the-stack_106_26437 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
import astropy
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print(
'ERROR: the documentation requires the sphinx-astropy' +
' package to be installed'
)
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 8)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output --------------------------------------------------
html_static_path = ['_static']
html_style = 'pyoof.css'
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Please update these texts to match the name of your package.
html_theme_options = {
'logotext1': 'py', # white, semi-bold
'logotext2': 'oof', # orange, light
'logotext3': ':docs' # white, light
}
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['sphinx_astropy.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project'])
# -- Inline Plotting ----------------------------------------------------------
# extensions += [
# 'matplotlib.sphinxext.only_directives',
# 'matplotlib.sphinxext.plot_directive',
# ]
# -- Options for the Sphinx gallery -------------------------------------------
# try:
# import sphinx_gallery
# extensions += ["sphinx_gallery.gen_gallery"]
# sphinx_gallery_conf = {
# 'backreferences_dir': 'generated/modules', # path to store the module using example template
# 'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
# 'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
# 'gallery_dirs': 'generated/examples', # path to save gallery generated examples
# 'reference_url': {
# 'astropy': None,
# 'matplotlib': 'http://matplotlib.org/',
# 'numpy': 'http://docs.scipy.org/doc/numpy/',
# },
# 'abort_on_example_error': True
# }
# except ImportError:
# def setup(app):
# app.warn('The sphinx_gallery extension is not installed, so the '
# 'gallery will not be built. You will probably see '
# 'additional warnings about undefined references due '
# 'to this.')
|
the-stack_106_26439 | import socket
from Node import Node
#ustawienenia naszego gniazda, port oraz nagłówek domyslny
HEADER = 64
PORT = 5050
FORMAT = 'utf-8' #formatowanie tekstu
DISCONNECT_MESSAGE = "!DISCONNECT" #formatowanie tekstu
SERVER = "10.9.25.109" # ip klienta
ADDR = (SERVER, PORT)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #tworzenie gniazda
client.connect(ADDR) #przypisanie adresu
def send(msg): #funkcja osblugujaca wysylanie informacji gniazdem
message = msg.encode(FORMAT) #deklaracja formatowania tekstu
msg_length = len(message) #okreslenei dlugosci tekstu
send_length = str(msg_length).encode(FORMAT) #nadanie dlugosci
send_length += b' ' * (HEADER - len(send_length)) #dodanie naglowka
client.send(send_length) #funkcja wysylajaca pakiet z dlugoscia wiadomosci
client.send(message) #funkcja wysylajaca wiadomosc
print(client.recv(2048).decode(FORMAT)) #potiwerdznei wyslania
nodes = []
initialNodes = []
def codeMessage(message): #kodowanie drzewa
#sortowanie wg czestototliwosci oraz zlicznie znkaow
dictionary = {}
for char in message:
if not char in dictionary :
dictionary[char] = 1
else :
dictionary[char] = dictionary[char] + 1
sortedDictionary = sorted(dictionary.items(),key=lambda x:x[1])
print(sortedDictionary)
for char in sortedDictionary :
initialNodes.append( Node( sortedDictionary[char] ) ) #tworzenie drzewa
nodes.append(initialNodes[0])
for i in range ( 1, len(initialNodes) ) :
if initialNodes[i].value > nodes[i-1].value :
nodes.append( Node( nodes[-1].value + initialNodes[i].value, nodes[-2], nodes[-1] ) )
string = 'Algorytm Huffmana działa rewelacyjnie!'
class NodeTree(object):
def __init__(self, left=None, right=None):
self.left = left #przejscie na najblizsza lewa galaz
self.right = right #przejscie na najblizsza prawa galaz
def children(self): #definicja "potomka"
return (self.left, self.right)
def nodes(self): #definicja wezla
return (self.left, self.right)
def __str__(self): #funkcja przechowujaca wartosc dla danego wezla
return '%s_%s' % (self.left, self.right)
def huffmanCodeTree(node, left=True, binString=''): #implementacja drzewa oraz kodowania huffmana
if type(node) is str:
return {node: binString} #zamiana na binarny string odpowiedni do przeslania gniazdem
(l, r) = node.children() #deklaracja drzewa
d = dict() #deklaracja slownika
d.update(huffmanCodeTree(l, True, binString + '0')) # przypisanie 0 przy przejsciu na lewo
d.update(huffmanCodeTree(r, False, binString + '1')) # przypisanie 1 przy przejsciu na prawo
return d
#sprawdzanie czestotliwosci wystepowania danego znaku w wiadomosci do zakodownaia
freq = {}
for c in string:
if c in freq:
freq[c] += 1
else:
freq[c] = 1
freq = sorted(freq.items(), key=lambda x: x[1], reverse=True) #sortowanie wg czestotliwosci
nodes = freq #przepisanie wartosci
while len(nodes) > 1: #petla dla całego drzewa, tworzenei slownika
(key1, c1) = nodes[-1] #rozprowadzanie danych po drzewie
(key2, c2) = nodes[-2]
nodes = nodes[:-2]
node = NodeTree(key1, key2)
nodes.append((node, c1 + c2))
nodes = sorted(nodes, key=lambda x: x[1], reverse=True)
huffmanCode = huffmanCodeTree(nodes[0][0]) #przekazanie slownika i danych do zmiennych
print(' Znak - Kod Huffmana ') #wyswietlanie
print(huffmanCode) #wysiwetlanie zakodownaj wiadomosci
messageToSend = ''
dictionaryToSend = '/'
for char in string: #dodanie po znaku naszego kodu
messageToSend = messageToSend + huffmanCode[char]
#Przepisanie slownika na string
for char in huffmanCode :
dictionaryToSend = dictionaryToSend + char + ':' + huffmanCode[char] + ','
print(dictionaryToSend) #wyswietlanie slownika
print(messageToSend) #wyswietlanie wiadomosci
send(dictionaryToSend) #wysylanie slownika
send(messageToSend) #wysylanie wiadomosci
send(DISCONNECT_MESSAGE) #koniec polaczneia |
the-stack_106_26440 | #
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Serial logs don't contain mount error messages
The messages:
"You are in emergency mode" / "Failed to mount" / "Unrecognized mount option"
in serial output usually indicate that a Linux instance cannot mount the root
partition.
"""
from typing import Optional
from gcpdiag import lint, models
from gcpdiag.lint.gce.utils import LogEntryShort, SerialOutputSearch
from gcpdiag.queries import gce
MOUNT_ERROR_MESSAGES = [
'You are in emergency mode', #
'Failed to mount',
'Unrecognized mount option',
]
logs_by_project = {}
def prepare_rule(context: models.Context):
logs_by_project[context.project_id] = SerialOutputSearch(
context, search_strings=MOUNT_ERROR_MESSAGES)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
search = logs_by_project[context.project_id]
instances = gce.get_instances(context).values()
if len(instances) == 0:
report.add_skipped(None, 'No instances found')
else:
for instance in sorted(instances, key=lambda i: i.name):
match: Optional[LogEntryShort] = search.get_last_match(
instance_id=instance.id)
if match:
report.add_failed(
instance,
('There are messages indicating that '
'the instance is unable to mount disk {}\n{}: "{}"').format(
instance.name, match.timestamp_iso, match.text))
else:
report.add_ok(instance)
|
the-stack_106_26441 | import logging
from config_utils import *
from sqlalchemy import *
from postgres_query import sql_query_action
logger = logging.getLogger()
class Camera_Query(object):
def __init__(self, postgres_db_conn):
self.conn = postgres_db_conn
def add_camera(self, camera_id, location, coordinate, address, port, uri):
query = "INSERT INTO cameras VALUES ('{}','{}','{}','{}','{}','{}')".format(camera_id,
location, coordinate, address, port, uri)
return sql_query_action(self.conn, query, camera_id)
def get_camera(self, camera_id):
table = 'cameras'
query_result = self.conn.execute(
"SELECT * FROM {} WHERE camera_id='{}'".format(table, camera_id)
).fetchall()
return query_result[0]
def get_camera_list(self):
try:
query_result = self.conn.execute(
"SELECT * FROM cameras"
).fetchall()
except Exception as e:
return []
return query_result
def edit_camera(self, camera_id, location, coordinate, address, port, uri):
query = "UPDATE cameras \
SET location='{}',coordinate='{}',address='{}',port='{}',uri='{}'\
WHERE camera_id='{}'".format(camera_id, location, coordinate, address, port, uri)
return sql_query_action(self.conn, query, camera_id)
def delete_camera(self, camera_id):
query = "DELETE FROM {} \
WHERE camera_id='{}'".format("cameras", camera_id)
return sql_query_action(self.conn, query, camera_id)
|
the-stack_106_26442 | import pytest
from django.urls import reverse
from ..views import get_filtered_user_queryset
pytestmark = pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize(
"filter_type, filter_mode, expected_result",
[
([], "any", 5), # Find everyone
(
["notification_digest", "feedback_volunteer"],
"any",
2,
), # Find users with either selected
(
["notification_digest", "feedback_volunteer"],
"all",
0,
), # Find users with both selected
(
["notification_digest", "feedback_volunteer"],
"none",
3,
), # Find users where neither is selected.
],
)
def test_filtering(admin_testdata, filter_type, filter_mode, expected_result):
assert (
get_filtered_user_queryset(filter_type, filter_mode).count() == expected_result
)
@pytest.mark.parametrize(
"view_name, gamer_to_use, expected_status_code, expected_location",
[
("adminutils:notification", None, 302, "/accounts/login/"), # Must be logged in
("adminutils:email", None, 302, "/accounts/login/"), # Must be logged in
("adminutils:notification", "gamer1", 403, None), # Must be an admin
("adminutils:email", "gamer1", 403, None), # Must be an admin
("adminutils:notification", "admin_gamer", 200, None), # Admin can access
("adminutils:email", "admin_gamer", 200, None), # Admin can access
],
)
def test_get_views(
client,
django_assert_max_num_queries,
admin_testdata,
view_name,
gamer_to_use,
expected_status_code,
expected_location,
):
if gamer_to_use:
client.force_login(user=getattr(admin_testdata, gamer_to_use).user)
with django_assert_max_num_queries(50):
response = client.get(reverse(view_name))
assert response.status_code == expected_status_code
if expected_location:
assert expected_location in response["Location"]
@pytest.mark.parametrize(
"view_name, data_to_send",
[
(
"adminutils:notification",
{
"message": "Hello, friend",
"filter_options": ["feedback_volunteer"],
"filter_mode": "any",
},
),
(
"adminutils:email",
{
"subject": "Greetings",
"body": "Do you have the time?",
"filter_options": ["feedback_volunteer"],
"filter_mode": "any",
},
),
],
)
def test_sending_messages(client, admin_testdata, view_name, data_to_send):
client.force_login(user=admin_testdata.admin_gamer.user)
response = client.post(reverse(view_name), data=data_to_send)
assert response.status_code == 302
|
the-stack_106_26443 | """
:codeauthor: Jayesh Kariya <[email protected]>
"""
import salt.states.layman as layman
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class LaymanTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.layman
"""
def setup_loader_modules(self):
return {layman: {}}
# 'present' function tests: 1
def test_present(self):
"""
Test to verify that the overlay is present.
"""
name = "sunrise"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[[name], []])
with patch.dict(layman.__salt__, {"layman.list_local": mock}):
comt = "Overlay {} already present".format(name)
ret.update({"comment": comt})
self.assertDictEqual(layman.present(name), ret)
with patch.dict(layman.__opts__, {"test": True}):
comt = "Overlay {} is set to be added".format(name)
ret.update({"comment": comt, "result": None})
self.assertDictEqual(layman.present(name), ret)
# 'absent' function tests: 1
def test_absent(self):
"""
Test to verify that the overlay is absent.
"""
name = "sunrise"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[[], [name]])
with patch.dict(layman.__salt__, {"layman.list_local": mock}):
comt = "Overlay {} already absent".format(name)
ret.update({"comment": comt})
self.assertDictEqual(layman.absent(name), ret)
with patch.dict(layman.__opts__, {"test": True}):
comt = "Overlay {} is set to be deleted".format(name)
ret.update({"comment": comt, "result": None})
self.assertDictEqual(layman.absent(name), ret)
|
the-stack_106_26445 | """
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Custom Layers
"""
import torch
from torch import nn
from torch.nn import functional as F
# Flatten layer
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
# Reshape layer
class Reshape(nn.Module):
def __init__(self, outer_shape):
super(Reshape, self).__init__()
self.outer_shape = outer_shape
def forward(self, x):
return x.view(x.size(0), *self.outer_shape)
# Sample from the Gumbel-Softmax distribution and optionally discretize.
class GumbelSoftmax(nn.Module):
def __init__(self, f_dim, c_dim):
super(GumbelSoftmax, self).__init__()
self.logits = nn.Linear(f_dim, c_dim)
self.f_dim = f_dim
self.c_dim = c_dim
def sample_gumbel(self, shape, is_cuda=False, eps=1e-20):
U = torch.rand(shape)
if is_cuda:
U = U.cuda()
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(self, logits, temperature):
y = logits + self.sample_gumbel(logits.size(), logits.is_cuda)
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(self, logits, temperature, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
#categorical_dim = 10
y = self.gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
# Set gradients w.r.t. y_hard gradients w.r.t. y
y_hard = (y_hard - y).detach() + y
return y_hard
def forward(self, x, temperature=1.0, hard=False):
logits = self.logits(x).view(-1, self.c_dim)
prob = F.softmax(logits, dim=-1)
# y = self.gumbel_softmax(logits, temperature, hard)
y = F.softmax(logits, dim=-1)
return logits, prob, y
# Sample from a Gaussian distribution
class Gaussian(nn.Module):
def __init__(self, in_dim, z_dim):
super(Gaussian, self).__init__()
self.mu = nn.Linear(in_dim, z_dim)
self.var = nn.Linear(in_dim, z_dim)
def reparameterize(self, mu, var):
std = torch.sqrt(var + 1e-10)
noise = torch.randn_like(std)
z = mu + noise * std
return z
def forward(self, x):
mu = self.mu(x)
var = F.softplus(self.var(x))
z = self.reparameterize(mu, var)
return mu, var, z
|
the-stack_106_26447 | #!/usr/bin/env python
# coding=utf-8
"""
test_neoepiscope.py
Tests functions in neoepiscope.py.
The MIT License (MIT)
Copyright (c) 2018 Mary A. Wood, Austin Nguyen,
Abhinav Nellore, and Reid Thompson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function
from inspect import getsourcefile
import os.path as path, sys
from neoepiscope import *
import unittest
import filecmp
import os
neoepiscope_dir = os.path.dirname(
os.path.dirname((os.path.abspath(getsourcefile(lambda: 0))))
)
def predicate(line):
''' whether reading first line of neoepiscope output '''
if "Neoepiscope version" in line:
return False
return True
class TestGTFprocessing(unittest.TestCase):
"""Tests proper creation of dictionaries store GTF data"""
def setUp(self):
"""Sets up gtf file and creates dictionaries for tests"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.gtf = os.path.join(self.base_dir, "Ychrom.gtf")
self.gtf2 = os.path.join(self.base_dir, "Chr11.gtf")
self.Ycds, self.Ytx = gtf_to_cds(self.gtf, "NA", pickle_it=False)
self.Ytree = cds_to_tree(self.Ycds, "NA", pickle_it=False)
self.cds11, self.tx11 = gtf_to_cds(self.gtf2, "NA", pickle_it=False)
self.tree11 = cds_to_tree(self.cds11, "NA", pickle_it=False)
def test_transcript_to_cds(self):
"""Fails if dictionary was built incorrectly"""
self.assertEqual(len(self.Ycds.keys()), 220)
start_test = [x for x in self.cds11['ENST00000429923.5_1'] if x[1] == "start_codon"]
self.assertEqual(len(start_test), 1)
self.assertEqual(start_test[0][2], 1891437)
def test_cds_tree(self):
"""Fails if dictionary was built incorrectly"""
self.assertEqual(len(self.Ytree.keys()), 1)
self.assertEqual(len(self.Ytree["chrY"]), 2585)
def test_transcript_extraction(self):
"""Fails if incorrect transcripts are pulled"""
self.assertEqual(
len(get_transcripts_from_tree("chrY", 150860, 150861, self.Ytree)), 10
)
self.coordinate_search = list(self.Ytree["chrY"].overlap(150860, 150861))
self.transcripts = []
for interval in self.coordinate_search:
self.transcripts.append(interval[2])
self.transcripts.sort()
self.assertEqual(
self.transcripts,
[
"ENST00000381657.7_3_PAR_Y",
"ENST00000381663.8_3_PAR_Y",
"ENST00000399012.6_3_PAR_Y",
"ENST00000415337.6_3_PAR_Y",
"ENST00000429181.6_2_PAR_Y",
"ENST00000430923.7_3_PAR_Y",
"ENST00000443019.6_2_PAR_Y",
"ENST00000445062.6_2_PAR_Y",
"ENST00000447472.6_3_PAR_Y",
"ENST00000448477.6_2_PAR_Y",
],
)
class TestVCFmerging(unittest.TestCase):
"""Tests proper merging of somatic and germline VCFS"""
def setUp(self):
"""Sets up files to use for tests"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.varscan = os.path.join(self.base_dir, "Ychrom.varscan.vcf")
self.germline = os.path.join(self.base_dir, "Ychrom.germline.vcf")
self.precombined = os.path.join(self.base_dir, "Ychrom.combined.vcf")
self.outvcf = os.path.join(self.base_dir, "Ychrom.testcombine.vcf")
combine_vcf(self.germline, self.varscan, self.outvcf)
def test_merge(self):
"""Fails if VCFs were merged improperly"""
self.assertTrue(filecmp.cmp(self.outvcf, self.precombined))
def tearDown(self):
"""Removes test file"""
os.remove(self.outvcf)
class TestPrepHapCUT(unittest.TestCase):
"""Tests addition of unphased mutations to HapCUT2 output"""
def setUp(self):
"""Sets up hapcut and vcf files to use for tests"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.hapcut = os.path.join(self.base_dir, "test.hapcut.out")
self.vcf = os.path.join(self.base_dir, "test.vcf")
self.phased_vcf = os.path.join(self.base_dir, "phased.vcf")
self.germline_vcf = os.path.join(self.base_dir, "germline.vcf")
self.complete_hapcut = os.path.join(self.base_dir, "complete_hapcut.out")
self.test_hapcut = os.path.join(self.base_dir, "test_complete_hapcut.out")
self.rbp_haplotypes = os.path.join(self.base_dir, "rbp.haplotypes")
self.test_rbp = os.path.join(self.base_dir, "test.rbp.haplotypes")
def test_haplotype_prep(self):
"""Tests that output of haplotype prep is correct for either regular
hapcut output or phased VCFs
"""
prep_hapcut_output(self.test_hapcut, self.hapcut, self.vcf)
self.assertTrue(filecmp.cmp(self.test_hapcut, self.complete_hapcut))
prep_hapcut_output(self.test_rbp, None, self.phased_vcf, phased_vcf=True)
self.assertTrue(filecmp.cmp(self.rbp_haplotypes, self.test_rbp))
def tearDown(self):
"""Removes test file"""
os.remove(self.test_hapcut)
os.remove(self.test_rbp)
class TestVAFpos(unittest.TestCase):
"""Tests fetching of VAF position from VCF file"""
def setUp(self):
"""Sets up vcf files to use for tests"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.varscan = os.path.join(self.base_dir, "Ychrom.varscan.vcf")
self.mutect = os.path.join(self.base_dir, "Ychrom.mutect.vcf")
def test_position(self):
"""Fails if incorrect positions are returned"""
self.assertEqual(get_vaf_pos(self.varscan), (5, 'FREQ'))
self.assertEqual(get_vaf_pos(self.mutect), (4, 'FA'))
class TestHaplotypeProcessing(unittest.TestCase):
"""Tests proper processing of HAPCUT2 files"""
def setUp(self):
"""Sets up input files and dictionaries to use for tests"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.ref_prefix = os.path.join(self.base_dir, "Chr11.ref")
self.reference_index = bowtie_index.BowtieIndexReference(self.ref_prefix)
self.Chr11gtf = os.path.join(self.base_dir, "Chr11.gtf")
self.Chr11cds, self.Chr11tx = gtf_to_cds(self.Chr11gtf, "NA", pickle_it=False)
for transcript in self.Chr11cds:
for cds_block in self.Chr11cds[transcript]:
cds_block[0] = cds_block[0].replace("chr", "")
self.Chr11tree = cds_to_tree(self.Chr11cds, "NA", pickle_it=False)
self.Chr11hapcut = os.path.join(self.base_dir, "Chr11.hapcut.out")
self.rbp_ref_prefix = os.path.join(self.base_dir, "chr14_index")
self.rbp_reference_index = bowtie_index.BowtieIndexReference(self.rbp_ref_prefix)
self.Chr14gtf = os.path.join(self.base_dir, "Chr14.gtf")
self.Chr14cds, self.Chr14tx = gtf_to_cds(self.Chr14gtf, "NA", pickle_it=False)
for transcript in self.Chr14cds:
for cds_block in self.Chr14cds[transcript]:
cds_block[0] = cds_block[0].replace("chr", "")
self.Chr14tree = cds_to_tree(self.Chr14cds, "NA", pickle_it=False)
self.phased_hapcut = os.path.join(self.base_dir, "rbp.haplotypes")
def test_hap_processing(self):
"""Fails if file is processed incorrectly"""
Chr11_txs, homozygous_vars = process_haplotypes(self.Chr11hapcut,
self.Chr11tree,
phasing=True)
phased_txs, phased_homozygous = process_haplotypes(self.phased_hapcut,
self.Chr14tree,
phasing=True)
self.assertEqual(sorted(Chr11_txs.keys()), ['ENST00000299106.8_2',
'ENST00000398531.2_2',
'ENST00000441717.3_2'])
self.assertEqual(homozygous_vars["ENST00000299106.8_2"],
[
[
"11",
134018663,
"A",
"G",
"1",
"1",
"1/1:.:17:17:0:0%:17,0,0,0:.:2",
"V",
]
]
)
self.assertEqual(Chr11_txs["ENST00000299106.8_2"],
[
[
[
"11",
134015873,
"GCAG",
4,
"1",
"0",
"0/1:.:53:52:0:0%:22,30,0,0:.:2",
"D"
],
[
"11",
134015876,
"",
"TT",
"1",
"0",
"0/1:.:53:52:0:0%:22,30,0,0:.:2",
"I"
]
],
[
[
"11",
134019062,
"T",
"C",
"0",
"1",
"0/1:.:38:38:0:0%:31,7,0,0:.:2",
"V"
]
]
]
)
self.assertEqual(
Chr11_txs["ENST00000398531.2_2"],
[
[
[
"11",
71276862,
"GT",
2,
"0",
"1",
"0/1:.:53:52:0:0%:22,30,0,0:.:2",
"D",
],
[
"11",
71276900,
"C",
"G",
"0",
"1",
"0/1:.:35:34:0:0%:19,15,0,0:.:2",
"V",
],
[
"11",
71277000,
"",
"AA",
"0",
"1",
"0/1:.:35:34:0:0%:19,15,0,0:.:2",
"I",
],
]
],
)
self.assertEqual(list(phased_txs.keys()), ['ENST00000409832.3'])
self.assertEqual(phased_homozygous, {})
self.assertEqual(
phased_txs["ENST00000409832.3"],
[
[
[
"14",
19553372,
"G",
"A",
"0",
"1",
"0/1:647,136:783:99:19553372-1,19553372-2:1684,0,17385:GERMLINE*",
"V",
],
[
"14",
19553436,
"C",
"T",
"1",
"0",
"0/1:740,103:843:99:19553372-2,19553372-1:1930,0,30239:3965.49:GERMLINE*",
"V",
],
[
"14",
19553443,
"G",
"A",
"0",
"1",
"0/1:726,98:824:99:19553372-1,19553372-2:1889,0,29565:17731.95:GERMLINE*",
"V",
],
[
"14",
19553764,
"A",
"G",
"0",
"1",
"0/1:1344,721:2065:99:19553372-1,19553372-2:15846,0,36781:726.04:GERMLINE*",
"V",
],
[
"14",
19553795,
"G",
"A",
"0",
"1",
"0/1:16.22%:19553372-1,19553372-2:8761.31:SOMATIC",
"V",
],
]
],
)
def test_maximum_clique(self):
ht = [ ['11', 5246952, 'A', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'],
['11', 5246956, 'G', 'A', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'],
['11', 5246956, 'G', 'T', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'],
['11', 5247812, 'A', 'T', '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'],
['11', 5247832, 'AGCT', 4, '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'D'],
['11', 5247834, 'CTT', 3, '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'D'],
['11', 5248161, '', 'A', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'I'],
['11', 5248161, '', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'I'],
]
cliques = list(transcript.get_haplotype_cliques(ht))
for x in cliques:
x.sort(key=itemgetter(1))
sorted_cliques = sorted(cliques)
self.assertEqual(
sorted_cliques,
[
[
('11', 5246952, 'A', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5246956, 'G', 'A', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'),
('11', 5248161, '', 'A', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'I')
],
[
('11', 5246952, 'A', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5246956, 'G', 'A', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'),
('11', 5248161, '', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'I')
],
[
('11', 5246952, 'A', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5246956, 'G', 'T', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'),
('11', 5248161, '', 'A', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'I')
],
[
('11', 5246952, 'A', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5246956, 'G', 'T', '0', '1',
'0/1:.:53:52:0:3.0%:22,30,0,0:.:2', 'V'),
('11', 5248161, '', 'T', '0', '1',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'I')
],
[
('11', 5247812, 'A', 'T', '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5247832, 'AGCT', 4, '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'D')
],
[
('11', 5247812, 'A', 'T', '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2', 'V'),
('11', 5247834, 'CTT', 3, '1', '0',
'0/1:.:35:34:0:0.1%:19,15,0,0:.:2*', 'D')
],
]
)
def test_peptide_gathering(self):
Chr11_txs = {
"ENST00000398531.2_2": [
[
[
"11",
71276651,
"CTC",
3,
"0",
"1",
"0/1:.:53:52:0:3.0%:22,30,0,0:.:2",
"D",
],
[
"11",
71277229,
"A",
"C",
"0",
"1",
"0/1:.:35:34:0:15.7%:19,15,0,0:.:2",
"V",
],
[
"11",
71277056,
"",
"AAA",
"0",
"1",
"0/1:.:35:34:0:0.1%:19,15,0,0:.:2",
"I",
],
]
]
}
homozygous_vars = {'ENST00000299106.8_2': [
[
"11",
134018663,
"A",
"G",
"1",
"1",
"1/1:.:17:17:0:0%:17,0,0,0:.:2",
"V",
]
]
}
transcript_blocks = self.Chr11cds["ENST00000398531.2_2"]
neoepitopes, fasta = get_peptides_from_transcripts(
Chr11_txs,
homozygous_vars,
(5, 'FREQ'),
self.Chr11cds,
True,
False,
False,
self.reference_index,
[8, 9, 10, 11],
False,
False,
False,
False,
False,
False,
2,
1,
protein_fasta=True,
)
self.assertEqual(len(neoepitopes.keys()), 108)
self.assertEqual(
neoepitopes["CGCSQKCN"],
[("11", 71277056, "", "AAA", "I", 0.001, "NA", "NA", "ENST00000398531.2_2")],
)
self.assertEqual(
neoepitopes["PVCCPCKI"],
[
(
"11",
71277229,
"A",
"C",
"V",
0.157,
"PVCCQCKI",
"NA",
"ENST00000398531.2_2",
)
],
)
self.assertEqual(
neoepitopes["NKQDGESYE"],
[
(
"11",
134018663,
"A",
"G",
"V",
0,
"NKQDGESYK",
"NA",
"ENST00000299106.8_2",
)
]
)
self.assertEqual(sorted(neoepitopes.keys())[0], "CCGCGGCG")
self.assertEqual(sorted(neoepitopes.keys())[-1], "YENPGKPDGVN")
self.assertEqual(
sorted(fasta["ENST00000398531.2_2"]),
[
"MGCCGCGGCGSGCGGCGSGCGGCGSGCGGYGSGCGGCGSSCCVPVCCCKPVCCCVPACSCSSCG"
"SCGGSKGDCGSCGGSKGGCGSCGGSKGGCGSCGGSKGGCGSCGGSKGGCGSCGGSKGGCGS"
"CGGSKGGCGSCGCSQKCNCCKPCCCSSGCGSCCQSSCCNPCCCQSSCCVPVCCQSSCCKPC"
"CCQSSCCVPVCCPCKIX"
],
)
class TestBindingPrediction(unittest.TestCase):
"""Tests binding prediction functions"""
def setUp(self):
""""""
self.neoepitopes = {
"CGCSQKCN": [("11", 71277056, "", "AAA", "I", 0.001, "ENST00000398531.2_2")],
"PVCCPCKI": [("11", 71277229, "A", "C", "V", 0.157, "ENST00000398531.2_2")],
}
self.tools = {
"mhcflurry1": ["mhcflurry-predict", ["affinity", "rank"]],
"mhcnuggets2": ["NA", ["affinity"]],
}
self.alleles = ["HLA-A*02:01", "HLA-B*07:02"]
self.size_list = [8]
def test_binding_scores(self):
new_neoepitopes = gather_binding_scores(
self.neoepitopes, self.tools, self.alleles, self.size_list
)
self.assertEqual(
len(new_neoepitopes["CGCSQKCN"][0]), 13)
self.assertEqual(
len(new_neoepitopes["PVCCPCKI"][0]), 13)
for score in new_neoepitopes["CGCSQKCN"][0][7:]:
self.assertEqual(type(score), str)
for score in new_neoepitopes["PVCCPCKI"][0][7:]:
self.assertEqual(type(score), str)
class TestOutput(unittest.TestCase):
"""Tests function to write output"""
def setUp(self):
"""Sets up paths and dictionaries"""
self.base_dir = os.path.join(neoepiscope_dir, "tests")
self.out_file = os.path.join(self.base_dir, "neoepiscope.out")
self.correct_out = os.path.join(self.base_dir, "expected.neoepiscope.out")
self.gtf = os.path.join(self.base_dir, "Chr11.gtf")
self.cds, self.tx = gtf_to_cds(self.gtf, "NA", pickle_it=False)
self.tools = {
"netMHCpan4": ["netMHCpan", ["rank", "affinity"]],
"netMHCIIpan3": ["netMHCIIpan", ["rank"]],
}
self.HLA_alleles = ["HLA*A01:01", "HLA*A02:01"]
self.neoepitopes = {
"CGCSQKCN": [
(
"11",
71277056,
"",
"AAA",
"I",
0.001,
"NA",
"NA",
"ENST00000398531.2_2",
5,
10000.0,
1,
5,
150,
4,
),
(
"11",
167789,
"A",
"T",
"V",
0.102,
"CGCSQCNN",
"NA",
"ENST00000410108.5_1",
0.5,
100,
0.5,
3,
150,
4,
),
],
"PVCCPCKI": [
(
"11",
71277229,
"A",
"C",
"V",
0.157,
"PVCCQCKI",
"NA",
"ENST00000398531.2_2",
10,
50.57,
1.2,
3,
10.1,
7,
),
(
"11",
71277229,
"A",
"C",
"V",
0.203,
"PVCCQCKI",
"NA",
"ENST00000325113.8_1",
10,
50.57,
1.2,
3,
10.1,
7,
),
],
}
def testwrite(self):
"""Tests that output file is written correctly"""
from sys import version_info
write_results(self.out_file, self.HLA_alleles, self.neoepitopes, self.tools, self.tx)
if version_info[0] < 3:
from itertools import izip, ifilter
with open(self.out_file) as fh1, open(self.correct_out) as fh2:
f1 = ifilter(predicate, fh1)
f2 = ifilter(predicate, fh2)
test = all(x == y for x, y in izip(f1, f2))
else:
with open(self.out_file) as fh1, open(self.correct_out) as fh2:
f1 = filter(predicate, fh1)
f2 = filter(predicate, fh2)
test = all(x == y for x, y in zip(f1, f2))
self.assertTrue(test)
def tearDown(self):
"""Removes test file"""
os.remove(self.out_file)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_26448 | import mmdet2trt.ops.util_ops as mm2trt_util
import torch
from mmdet2trt.core.post_processing.batched_nms import BatchedNMS
from mmdet2trt.models.builder import build_wraper, register_wraper
from torch import nn
@register_wraper('mmdet.models.GARetinaHead')
class GuidedAnchorHeadWraper(nn.Module):
def __init__(self, module):
super(GuidedAnchorHeadWraper, self).__init__()
self.module = module
self.loc_filter_thr = module.loc_filter_thr
self.num_anchors = module.num_anchors
self.square_anchor_generator = build_wraper(
self.module.square_anchor_generator)
self.anchor_coder = build_wraper(self.module.anchor_coder)
self.bbox_coder = build_wraper(self.module.bbox_coder)
self.test_cfg = module.test_cfg
self.num_classes = self.module.num_classes
self.use_sigmoid_cls = self.module.use_sigmoid_cls
if ('score_thr' in module.test_cfg) and (
'nms' in module.test_cfg) and ('iou_threshold'
in module.test_cfg.nms):
self.rcnn_nms = BatchedNMS(
module.test_cfg.score_thr,
module.test_cfg.nms.iou_threshold,
backgroundLabelId=self.num_classes)
def get_anchors(self,
cls_scores,
shape_preds,
loc_preds,
use_loc_filter=False):
multi_level_squares = self.square_anchor_generator(
cls_scores, device=cls_scores[0].device)
num_levels = len(cls_scores)
guided_anchors_list = []
loc_mask_list = []
for i in range(num_levels):
squares = multi_level_squares[i]
shape_pred = shape_preds[i]
loc_pred = loc_preds[i]
guided_anchors, loc_mask = self._get_guided_anchors(
squares, shape_pred, loc_pred, use_loc_filter)
guided_anchors_list.append(guided_anchors)
loc_mask_list.append(loc_mask)
return multi_level_squares, guided_anchors_list, loc_mask_list
def _get_guided_anchors(self,
squares,
shape_pred,
loc_pred,
use_loc_filter=False):
loc_pred = loc_pred.sigmoid()
if use_loc_filter:
loc_mask = loc_pred >= self.loc_filter_thr
else:
loc_mask = loc_pred >= 0.0
mask = loc_mask.permute(0, 2, 3,
1).float().expand(-1, -1, -1, self.num_anchors)
mask = mask.view(mask.shape[0], -1)
# calculate guided anchors
squares = squares.unsqueeze(0)
anchor_deltas = shape_pred.permute(0, 2, 3, 1).contiguous().view(
shape_pred.shape[0], -1, 2)
zeros = anchor_deltas[:, :, :2] * 0.
bbox_deltas = torch.cat([zeros, anchor_deltas], dim=2)
guided_anchors = self.anchor_coder.decode(
squares, bbox_deltas, wh_ratio_clip=1e-6)
return guided_anchors, mask
def forward(self, feat, x):
img_shape = x.shape[2:]
module = self.module
cfg = self.test_cfg
cls_scores, bbox_preds, shape_preds, loc_preds = module(feat)
_, mlvl_anchors, mlvl_masks = self.get_anchors(
cls_scores, shape_preds, loc_preds, use_loc_filter=True)
mlvl_scores = []
mlvl_proposals = []
nms_pre = cfg.get('nms_pre', -1)
for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
mlvl_anchors,
mlvl_masks):
scores = cls_score.permute(0, 2, 3, 1).reshape(
cls_score.shape[0], -1, module.cls_out_channels).sigmoid()
if module.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(-1)
scores = scores * mask.unsqueeze(2)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(bbox_pred.shape[0], -1, 4)
if nms_pre > 0:
# concate zero to enable topk,
# dirty way, will find a better way in future
scores = mm2trt_util.pad_with_value(scores, 1, nms_pre, 0.)
bbox_pred = mm2trt_util.pad_with_value(bbox_pred, 1, nms_pre)
anchors = mm2trt_util.pad_with_value(anchors, 1, nms_pre)
# do topk
max_scores, _ = (scores).max(dim=2)
_, topk_inds = max_scores.topk(nms_pre, dim=1)
bbox_pred = mm2trt_util.gather_topk(bbox_pred, 1, topk_inds)
scores = mm2trt_util.gather_topk(scores, 1, topk_inds)
anchors = mm2trt_util.gather_topk(anchors, 1, topk_inds)
proposals = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_scores.append(scores)
mlvl_proposals.append(proposals)
mlvl_scores = torch.cat(mlvl_scores, dim=1)
mlvl_proposals = torch.cat(mlvl_proposals, dim=1)
mlvl_proposals = mlvl_proposals.unsqueeze(2)
max_scores, _ = mlvl_scores.max(dim=2)
topk_pre = max(1000, nms_pre)
_, topk_inds = max_scores.topk(
min(topk_pre, mlvl_scores.shape[1]), dim=1)
mlvl_scores = mm2trt_util.gather_topk(mlvl_scores, 1, topk_inds)
mlvl_proposals = mm2trt_util.gather_topk(mlvl_proposals, 1, topk_inds)
num_bboxes = mlvl_proposals.shape[1]
num_detected, proposals, scores, cls_id = self.rcnn_nms(
mlvl_scores, mlvl_proposals, num_bboxes, self.test_cfg.max_per_img)
return num_detected, proposals, scores, cls_id
|
the-stack_106_26450 | from krogon.k8s.k8s_env_vars import add_environment_secret
from krogon.nullable import nlist, nmap
from typing import List
import krogon.maybe as M
def cron_job(name: str, image: str):
return K8sJobTemplate(name, image)
class K8sJobTemplate:
def __init__(self, name: str, image: str):
super().__init__()
self.name = name
self.image = image
self.command = M.nothing()
self.environment_vars = []
self.schedule = '* * * * *'
self.suspend = True
def with_environment_variable(self, name: str, value: str):
self.environment_vars = self.environment_vars + [{'name': name, 'value': value}]
return self
def with_schedule(self, schedule: str):
self.schedule = schedule
self.suspend = False
return self
def with_command(self, command: str):
self.command = M.just(command)
return self
def with_environment_secret(self, secret_name: str, data: map):
self.environment_vars = add_environment_secret(self.environment_vars, secret_name, data)
return self
def run(self) -> List[dict]:
return _get_templates(self.name, self.image,
self.schedule, self.suspend,
self.environment_vars,
self.command)
def _get_templates(name: str, image: str,
schedule: str, suspend: bool, env_vars: List[str],
command: M.Maybe[List[str]]) -> List[dict]:
return nlist([
{
'kind': 'CronJob',
'apiVersion': 'batch/v1beta1',
'metadata': {'name': name},
'spec': {
'suspend': suspend,
'concurrencyPolicy': 'Forbid',
'schedule': schedule,
'jobTemplate': {'spec': {
'template': {
'metadata': {
'annotations': {
'traffic.sidecar.istio.io/excludeOutboundIPRanges': "0.0.0.0/0",
"sidecar.istio.io/inject": "false"}
},
'spec': {
'containers': nlist([
nmap({
'name': name,
'image': image,
'env': env_vars
}).append_if_value(
'command', command).to_map()
]).to_list(),
'volumes': nlist([]).to_list(),
'restartPolicy': 'Never'
}
},
'backoffLimit': 0
}},
}
}
]).to_list()
|
the-stack_106_26451 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import logging
import shlex
import subprocess
from collections import defaultdict
from typing import Dict, List, Optional
import detectron2.utils.comm as comm
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
from pycocotools.coco import COCO
from .cache_util import _cache_json_file
try:
# virtual_fs is used to support both local and manifold paths
# with syntax that is identical to the default python APIs
from virtual_fs import virtual_os as os
from virtual_fs.virtual_io import open
except ImportError:
import os
logger = logging.getLogger(__name__)
class InMemoryCOCO(COCO):
def __init__(self, loaded_json):
"""
In this in-memory version of COCO we don't load json from the file,
but direclty use a loaded_json instead. This approach improves
both robustness and efficiency, as when we convert from other formats
to COCO format, we don't need to save and re-load the json again.
"""
# load dataset
self.dataset = loaded_json
self.anns = {}
self.cats = {}
self.imgs = {}
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
self.createIndex()
def extract_archive_file(archive_fn: str, im_dir: str):
if not os.path.exists(im_dir) or not os.listdir(im_dir):
# Dataset is not deployed. Deploy it.
archive_fns = archive_fn
# A dataset may be composed of several tgz files, or only one.
# If one, make it into a list to make the code later more general
if not isinstance(archive_fns, list):
archive_fns = [archive_fns]
logger.info(
"Extracting datasets {} to local machine at {}".format(archive_fns, im_dir)
)
if not os.path.exists(im_dir):
os.makedirs(im_dir, exist_ok=True)
for archive_fn in archive_fns:
# Extract the tgz file directly into the target directory,
# without precopy.
# Note that the tgz file contains a root directory that
# we do not want, hence the strip-components=1
commandUnpack = (
"tar -mxzf {src_file} -C {tgt_dir} " "--strip-components=1"
).format(src_file=archive_fn, tgt_dir=im_dir)
assert not subprocess.call(shlex.split(commandUnpack)), "Failed to unpack"
logger.info("Extracted {}".format(archive_fn))
def convert_coco_text_to_coco_detection_json(
source_json: str,
target_json: str,
set_type: Optional[str] = None,
min_img_size: int = 100,
text_cat_id: int = 1,
) -> Dict:
"""
This function converts a COCOText style JSON to a COCODetection style
JSON.
For COCOText see: https://vision.cornell.edu/se3/coco-text-2/
For COCODetection see: http://cocodataset.org/#overview
"""
with open(source_json, "r") as f:
coco_text_json = json.load(f)
coco_text_json["annotations"] = list(coco_text_json["anns"].values())
coco_text_json["images"] = list(coco_text_json["imgs"].values())
if set_type is not None:
# COCO Text style JSONs often mix test, train, and val sets.
# We need to make sure we only use the data type we want.
coco_text_json["images"] = [
x for x in coco_text_json["images"] if x["set"] == set_type
]
coco_text_json["categories"] = [{"name": "text", "id": text_cat_id}]
del coco_text_json["cats"]
del coco_text_json["imgs"]
del coco_text_json["anns"]
for ann in coco_text_json["annotations"]:
ann["category_id"] = text_cat_id
ann["iscrowd"] = 0
# Don't evaluate the model on illegible words
if set_type == "val" and ann["legibility"] != "legible":
ann["ignore"] = True
# Some datasets seem to have extremely small images which break downstream
# operations. If min_img_size is set, we can remove these.
coco_text_json["images"] = [
x
for x in coco_text_json["images"]
if x["height"] >= min_img_size and x["width"] >= min_img_size
]
# Remap image_ids if necessary
if isinstance(coco_text_json["images"][0]["id"], str):
image_id_remap = {
x["id"]: id_no for (id_no, x) in enumerate(coco_text_json["images"])
}
for x in coco_text_json["images"]:
x["id"] = image_id_remap[x["id"]]
for x in coco_text_json["annotations"]:
if x["image_id"] in image_id_remap:
x["image_id"] = image_id_remap[x["image_id"]]
os.makedirs(os.path.dirname(target_json), exist_ok=True)
with open(target_json, "w") as f:
json.dump(coco_text_json, f)
return coco_text_json
def valid_bbox(bbox_xywh: List[int], img_w: int, img_h: int) -> bool:
if (
bbox_xywh is None
or (bbox_xywh[3] == 0 or bbox_xywh[2] == 0)
or not (0 <= bbox_xywh[0] <= img_w - bbox_xywh[2])
or not (0 <= bbox_xywh[1] <= img_h - bbox_xywh[3])
):
return False
return True
def convert_coco_annotations(
anno_dict_list: List[Dict], record: Dict, remapped_id: Dict, error_report: Dict
):
"""
Converts annotations format of coco to internal format while applying
some filtering
"""
converted_annotations = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same. This fails
# only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == record["image_id"]
assert anno.get("ignore", 0) == 0
# Copy fields that do not need additional conversion
fields_to_copy = [
"iscrowd",
"bbox",
"bbox_mode",
"keypoints",
"category_id",
"extras",
"point_coords",
"point_labels",
]
# NOTE: maybe use MetadataCatalog for this
obj = {field: anno[field] for field in fields_to_copy if field in anno}
# Filter out bad annotations where category do not match
if obj.get("category_id", None) not in remapped_id:
continue
# Bounding boxes: convert and filter out bad bounding box annotations
bbox_object = obj.get("bbox", None)
if bbox_object:
if "bbox_mode" in obj:
bbox_object = BoxMode.convert(
bbox_object, obj["bbox_mode"], BoxMode.XYWH_ABS
)
else:
# Assume default box mode is always (x, y, w h)
error_report["without_bbox_mode"].cnt += 1
obj["bbox_mode"] = (
BoxMode.XYWHA_ABS if len(obj["bbox"]) == 5 else BoxMode.XYWH_ABS
)
if (
record.get("width")
and record.get("height")
and not valid_bbox(bbox_object, record["width"], record["height"])
):
error_report["without_valid_bounding_box"].cnt += 1
continue
# Segmentation: filter and add segmentation
segm = anno.get("segmentation", None)
if segm: # either list[list[float]] or dict(RLE)
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
error_report["without_valid_segmentation"].cnt += 1
continue # ignore this instance
obj["segmentation"] = segm
# Remap ids
obj["category_id"] = remapped_id[obj["category_id"]]
converted_annotations.append(obj)
return converted_annotations
# Error entry class for reporting coco conversion issues
class ErrorEntry:
def __init__(self, error_name, msg, cnt=0):
self.error_name = error_name
self.cnt = cnt
self.msg = msg
def __repr__(self):
return f"{self.msg} for {self.error_name}, count = {self.cnt}"
def print_conversion_report(ann_error_report, image_error_report, ex_warning_fn):
# Report image errors
report_str = ""
for error_key in image_error_report:
if image_error_report[error_key].cnt > 0:
report_str += f"\t{image_error_report[error_key]}\n"
if error_key == "ignore_image_root" and ex_warning_fn:
report_str += f"\texample file name {ex_warning_fn}\n"
# Report annotation errors
for error_key in ann_error_report:
if ann_error_report[error_key].cnt > 0:
report_str += f"\t{ann_error_report[error_key]}\n"
if len(report_str):
logger.warning(f"Conversion issues:\n{report_str}")
def convert_to_dict_list(
image_root: str,
remapped_id: Dict,
imgs: List[Dict],
anns: List[Dict],
dataset_name: Optional[str] = None,
image_direct_copy_keys: Optional[List[str]] = None,
filter_empty_annotations: Optional[bool] = True,
) -> List[Dict]:
ann_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("without_valid_segmentation", "Instance filtered"),
("without_valid_bounding_box", "Instance filtered"),
("without_bbox_mode", "Warning"),
]
}
image_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("ignore_image_root", f"Image root ignored {image_root}"),
(
"no_annotations",
"Image filtered" if filter_empty_annotations else "Warning",
),
]
}
ex_warning_fn = None
default_record = {"dataset_name": dataset_name} if dataset_name else {}
converted_dict_list = []
for (img_dict, anno_dict_list) in zip(imgs, anns):
record = copy.deepcopy(default_record)
# NOTE: besides using (relative path) in the "file_name" filed to represent
# the image resource, "extended coco" also supports using uri which
# represents an image using a single string, eg. "everstore_handle://xxx",
if "://" not in img_dict["file_name"]:
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
else:
if image_root is not None:
image_error_report["ignore_image_root"].cnt += 1
ex_warning_fn = (
ex_warning_fn if ex_warning_fn else img_dict["file_name"]
)
record["file_name"] = img_dict["file_name"]
# Setup image info and id
if "height" in img_dict or "width" in img_dict:
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
# Convert annotation for dataset_dict
converted_anns = convert_coco_annotations(
anno_dict_list, record, remapped_id, ann_error_report
)
if len(converted_anns) == 0:
image_error_report["no_annotations"].cnt += 1
if filter_empty_annotations:
continue
record["annotations"] = converted_anns
# Copy keys if additionally asked
if image_direct_copy_keys:
for c_key in image_direct_copy_keys:
assert c_key in img_dict, f"{c_key} not in coco image entry annotation"
record[c_key] = img_dict[c_key]
converted_dict_list.append(record)
print_conversion_report(ann_error_report, image_error_report, ex_warning_fn)
assert len(converted_dict_list) != 0, (
f"Loaded zero entries from {dataset_name}. \n"
f" Size of inputs (imgs={len(imgs)}, anns={len(anns)})\n"
f" Image issues ({image_error_report})\n"
f" Instance issues ({ann_error_report})\n"
)
return converted_dict_list
def coco_text_load(
coco_json_file: str,
image_root: str,
source_json_file: Optional[str] = None,
dataset_name: Optional[str] = None,
archive_file: Optional[str] = None,
) -> List[Dict]:
if archive_file is not None:
if comm.get_rank() == 0:
extract_archive_file(archive_file, image_root)
comm.synchronize()
if source_json_file is not None:
# Need to convert to coco detection format
loaded_json = convert_coco_text_to_coco_detection_json(
source_json_file, coco_json_file
)
return extended_coco_load(coco_json_file, image_root, dataset_name, loaded_json)
return extended_coco_load(
coco_json_file, image_root, dataset_name, loaded_json=None
)
def extended_coco_load(
json_file: str,
image_root: str,
dataset_name: Optional[str] = None,
loaded_json: Optional[str] = None,
image_direct_copy_keys: List[str] = None,
filter_empty_annotations: Optional[bool] = True,
) -> List[Dict]:
"""
Load a json file with COCO's annotation format.
Currently only supports instance segmentation annotations.
Args:
json_file (str): full path to the json file in COCO annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "coco", "cityscapes").
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
loaded_json (str): optional loaded json content, used in InMemoryCOCO to
avoid loading from json_file again.
Returns:
list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md)
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
2. When `dataset_name=='coco'`,
this function will translate COCO's
incontiguous category ids to contiguous ids in [0, 80).
"""
json_file = _cache_json_file(json_file)
if loaded_json is None:
coco_api = COCO(json_file)
else:
coco_api = InMemoryCOCO(loaded_json)
# Collect classes and remap them starting from 0
all_cat_ids = coco_api.getCatIds()
all_cats = coco_api.loadCats(all_cat_ids)
all_cat_names = [c["name"] for c in sorted(all_cats, key=lambda x: x["id"])]
# Setup id remapping
remapped_id = {}
for cat_id, cat in zip(all_cat_ids, all_cats):
remapped_id[cat_id] = all_cat_names.index(cat["name"])
# Register dataset in metadata catalog
if dataset_name is not None:
# overwrite attrs
meta_dict = MetadataCatalog.get(dataset_name).as_dict()
meta_dict["thing_classes"] = all_cat_names
meta_dict["thing_dataset_id_to_contiguous_id"] = remapped_id
# update MetadataCatalog (cannot change inplace, have to remove)
MetadataCatalog.remove(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta_dict)
# assert the change
assert MetadataCatalog.get(dataset_name).thing_classes == all_cat_names
# Sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
logger.info("Loaded {} images from {}".format(len(imgs), json_file))
# Return the coco converted to record list
return convert_to_dict_list(
image_root,
remapped_id,
imgs,
anns,
dataset_name,
image_direct_copy_keys=image_direct_copy_keys,
filter_empty_annotations=filter_empty_annotations,
)
if __name__ == "__main__":
"""
Test the COCO json dataset loader.
Usage:
python -m detectron2.data.datasets.coco \
path/to/json path/to/image_root dataset_name
"""
import sys
import cv2
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = extended_coco_load(sys.argv[1], sys.argv[2], sys.argv[3], ["cat", "dog"])
logger.info("Done loading {} samples.".format(len(dicts)))
for d in dicts:
img = cv2.imread(d["file_name"])[:, :, ::-1]
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join("coco-data-vis", os.path.basename(d["file_name"]))
vis.save(fpath)
|
the-stack_106_26452 | from six import string_types
import numpy as np
from landlab.components.erosion_deposition.generalized_erosion_deposition import (_GeneralizedErosionDeposition,
DEFAULT_MINIMUM_TIME_STEP)
from landlab.utils.return_array import return_array_at_node
from .cfuncs import calculate_qs_in
ROOT2 = np.sqrt(2.0) # syntactic sugar for precalculated square root of 2
TIME_STEP_FACTOR = 0.5 # factor used in simple subdivision solver
class Space(_GeneralizedErosionDeposition):
"""Stream Power with Alluvium Conservation and Entrainment (SPACE)
See the publication:
Shobe, C. M., Tucker, G. E., and Barnhart, K. R.: The SPACE 1.0 model: a
Landlab component for 2-D calculation of sediment transport, bedrock
erosion, and landscape evolution, Geosci. Model Dev., 10, 4577-4604,
https://doi.org/10.5194/gmd-10-4577-2017, 2017.
Note: If timesteps are large enough that Es*dt (sediment erosion)
exceeds sediment thickness H, the 'adaptive' solver is necessary to
subdivide timesteps. Compare Es and H arrays to determine whether
timesteps are appropriate or too large for the 'basic' solver.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
K_sed : float, field name, or array
Erodibility for sediment (units vary).
K_br : float, field name, or array
Erodibility for bedrock (units vary).
F_f : float
Fraction of permanently suspendable fines in bedrock [-].
phi : float
Sediment porosity [-].
H_star : float
Sediment thickness required for full entrainment [L].
v_s : float
Effective settling velocity for chosen grain size metric [L/T].
m_sp : float
Drainage area exponent (units vary)
n_sp : float
Slope exponent (units vary)
sp_crit_sed : float, field name, or array
Critical stream power to erode sediment [E/(TL^2)]
sp_crit_br : float, field name, or array
Critical stream power to erode rock [E/(TL^2)]
discharge_field : float, field name, or array
Discharge [L^2/T]. The default is to use the grid field
'surface_water__discharge', which is simply drainage area
multiplied by the default rainfall rate (1 m/yr). To use custom
spatially/temporally varying flow, use 'water__unit_flux_in'
as the discharge field.
solver : string
Solver to use. Options at present include:
(1) 'basic' (default): explicit forward-time extrapolation.
Simple but will become unstable if time step is too large.
(2) 'adaptive': subdivides global time step as needed to
prevent slopes from reversing and alluvium from going
negative.
Examples
---------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components.flow_routing import FlowRouter
>>> from landlab.components import DepressionFinderAndRouter
>>> from landlab.components import Space
>>> from landlab.components import FastscapeEroder
>>> np.random.seed(seed = 5000)
Define grid and initial topography:
* 5x5 grid with baselevel in the lower left corner
* All other boundary nodes closed
* Initial topography is plane tilted up to the upper right with
noise
>>> mg = RasterModelGrid((5, 5), spacing=10.0)
>>> _ = mg.add_zeros('topographic__elevation', at='node')
>>> mg.at_node['topographic__elevation'] += (mg.node_y / 10. +
... mg.node_x / 10. + np.random.rand(len(mg.node_y)) / 10.)
>>> mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node['topographic__elevation'], -9999.)
>>> fsc_dt = 100.
>>> space_dt = 100.
Instantiate Fastscape eroder, flow router, and depression finder
>>> fsc = FastscapeEroder(mg, K_sp=.001, m_sp=.5, n_sp=1)
>>> fr = FlowRouter(mg)
>>> df = DepressionFinderAndRouter(mg)
Burn in an initial drainage network using the Fastscape eroder:
>>> for x in range(100):
... fr.run_one_step()
... df.map_depressions()
... flooded = np.where(df.flood_status == 3)[0]
... fsc.run_one_step(dt=fsc_dt, flooded_nodes=flooded)
... mg.at_node['topographic__elevation'][0] -= 0.001 # Uplift
Add some soil to the drainage network:
>>> _ = mg.add_zeros('soil__depth', at='node', dtype=float)
>>> mg.at_node['soil__depth'] += 0.5
>>> mg.at_node['topographic__elevation'] += mg.at_node['soil__depth']
Instantiate the Space component:
>>> ha = Space(mg, K_sed=0.00001, K_br=0.00000000001,
... F_f=0.5, phi=0.1, H_star=1., v_s=0.001,
... m_sp=0.5, n_sp = 1.0, sp_crit_sed=0,
... sp_crit_br=0)
Now run the Space component for 2000 short timesteps:
>>> for x in range(2000): #Space component loop
... fr.run_one_step()
... df.map_depressions()
... flooded = np.where(df.flood_status == 3)[0]
... ha.run_one_step(dt=space_dt, flooded_nodes=flooded)
... mg.at_node['bedrock__elevation'][0] -= 2e-6 * space_dt
Now we test to see if soil depth and topography are right:
>>> np.around(mg.at_node['soil__depth'], decimals=3) # doctest: +NORMALIZE_WHITESPACE
array([ 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.495, 0.493,
0.492, 0.5 , 0.5 , 0.493, 0.493, 0.491, 0.5 , 0.5 ,
0.492, 0.491, 0.486, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ])
>>> np.around(mg.at_node['topographic__elevation'], decimals=3) # doctest: +NORMALIZE_WHITESPACE
array([ 0.423, 1.536, 2.573, 3.511, 4.561, 1.582, 0.424, 0.429,
0.438, 5.51 , 2.54 , 0.429, 0.429, 0.439, 6.526, 3.559,
0.438, 0.439, 0.451, 7.553, 4.559, 5.541, 6.57 , 7.504,
8.51 ])
"""
_name= 'Space'
_input_var_names = (
'flow__receiver_node',
'flow__upstream_node_order',
'topographic__steepest_slope',
'drainage_area',
'soil__depth'
)
_output_var_names = (
'topographic__elevation'
'soil__depth'
)
_var_units = {
'flow__receiver_node': '-',
'flow__upstream_node_order': '-',
'topographic__steepest_slope': '-',
'drainage_area': 'm**2',
'soil__depth': 'm',
'topographic__elevation': 'm',
}
_var_mapping = {
'flow__receiver_node': 'node',
'flow__upstream_node_order': 'node',
'topographic__steepest_slope': 'node',
'drainage_area': 'node',
'soil__depth': 'node',
'topographic__elevation': 'node',
}
_var_doc = {
'flow__receiver_node':
'Node array of receivers (node that receives flow from current '
'node)',
'flow__upstream_node_order':
'Node array containing downstream-to-upstream ordered list of '
'node IDs',
'topographic__steepest_slope':
'Topographic slope at each node',
'drainage_area':
"Upstream accumulated surface area contributing to the node's "
"discharge",
'soil__depth':
'Depth of sediment above bedrock',
'topographic__elevation':
'Land surface topographic elevation',
}
_cite_as = """@Article{gmd-10-4577-2017,
AUTHOR = {Shobe, C. M. and Tucker, G. E. and Barnhart, K. R.},
TITLE = {The SPACE~1.0 model: a~Landlab component for 2-D calculation of sediment transport, bedrock erosion, and landscape evolution},
JOURNAL = {Geoscientific Model Development},
VOLUME = {10},
YEAR = {2017},
NUMBER = {12},
PAGES = {4577--4604},
URL = {https://www.geosci-model-dev.net/10/4577/2017/},
DOI = {10.5194/gmd-10-4577-2017}
}"""
def __init__(self, grid, K_sed=None, K_br=None, F_f=None,
phi=None, H_star=None, v_s=None,
m_sp=None, n_sp=None, sp_crit_sed=None,
sp_crit_br=None, discharge_field='surface_water__discharge',
solver='basic',
dt_min=DEFAULT_MINIMUM_TIME_STEP,
**kwds):
"""Initialize the Space model.
"""
super(Space, self).__init__(grid, m_sp=m_sp, n_sp=n_sp,
phi=phi, F_f=F_f, v_s=v_s,
dt_min=dt_min,
discharge_field=discharge_field)
self._grid = grid #store grid
# space specific inits
self.H_star = H_star
if 'soil__depth' in grid.at_node:
self.soil__depth = grid.at_node['soil__depth']
else:
self.soil__depth = grid.add_zeros(
'soil__depth', at='node', dtype=float)
if 'bedrock__elevation' in grid.at_node:
self.bedrock__elevation = grid.at_node['bedrock__elevation']
else:
self.bedrock__elevation = grid.add_zeros(
'bedrock__elevation', at='node', dtype=float)
self.bedrock__elevation[:] = self.topographic__elevation -\
self.soil__depth
self.Es = np.zeros(grid.number_of_nodes)
self.Er = np.zeros(grid.number_of_nodes)
#K's and critical values can be floats, grid fields, or arrays
self.K_sed = return_array_at_node(grid, K_sed)
self.K_br = return_array_at_node(grid, K_br)
self.sp_crit_sed = return_array_at_node(grid, sp_crit_sed)
self.sp_crit_br = return_array_at_node(grid, sp_crit_br)
# Handle option for solver
if solver == 'basic':
self.run_one_step = self.run_one_step_basic
elif solver == 'adaptive':
self.run_one_step = self.run_with_adaptive_time_step_solver
self.time_to_flat = np.zeros(grid.number_of_nodes)
self.porosity_factor = 1.0 / (1.0 - self.phi)
else:
raise ValueError("Parameter 'solver' must be one of: "
+ "'basic', 'adaptive'")
def _calc_erosion_rates(self):
"""Calculate erosion rates."""
# if sp_crits are zero, then this colapses to correct all the time.
omega_sed = self.K_sed * self.Q_to_the_m * np.power(self.slope, self.n_sp)
omega_br = self.K_br * self.Q_to_the_m * np.power(self.slope, self.n_sp)
omega_sed_over_sp_crit = np.divide(omega_sed, self.sp_crit_sed,
out=np.zeros_like(omega_sed),
where=self.sp_crit_sed!=0)
omega_br_over_sp_crit = np.divide(omega_br, self.sp_crit_br,
out=np.zeros_like(omega_br),
where=self.sp_crit_br!=0)
self.sed_erosion_term = omega_sed - self.sp_crit_sed * (1.0 - np.exp(-omega_sed_over_sp_crit))
self.br_erosion_term = omega_br - self.sp_crit_br * (1.0 - np.exp(-omega_br_over_sp_crit))
self.Es = self.sed_erosion_term * (1.0 - np.exp(-self.soil__depth / self.H_star))
self.Er = self.br_erosion_term * np.exp(-self.soil__depth / self.H_star)
def run_one_step_basic(self, dt=1.0, flooded_nodes=None, **kwds):
"""Calculate change in rock and alluvium thickness for
a time period 'dt'.
Parameters
----------
dt : float
Model timestep [T]
flooded_nodes : array
Indices of flooded nodes, passed from flow router
"""
#Choose a method for calculating erosion:
self._calc_hydrology()
self._calc_erosion_rates()
self.qs_in[:] = 0
#iterate top to bottom through the stack, calculate qs
# cythonized version of calculating qs_in
calculate_qs_in(np.flipud(self.stack),
self.flow_receivers,
self.cell_area_at_node,
self.q,
self.qs,
self.qs_in,
self.Es,
self.Er,
self.v_s,
self.F_f,
self.phi)
self.depo_rate[self.q > 0] = (self.qs[self.q > 0]
* (self.v_s / self.q[self.q > 0]))
#now, the analytical solution to soil thickness in time:
#need to distinguish D=kqS from all other cases to save from blowup!
flooded = np.full(self._grid.number_of_nodes, False, dtype=bool)
flooded[flooded_nodes] = True
#distinguish cases:
blowup = self.depo_rate == self.K_sed * self.Q_to_the_m * self.slope
##first, potential blowup case:
#positive slopes, not flooded
pos_not_flood = ((self.q > 0) &
(blowup==True) &
(self.slope > 0) &
(flooded==False))
self.soil__depth[pos_not_flood] = (self.H_star *
np.log((self.sed_erosion_term[pos_not_flood] /
self.H_star) *
dt +
np.exp(self.soil__depth[pos_not_flood] /
self.H_star)))
#positive slopes, flooded
pos_flood = ((self.q > 0) &
(blowup==True) &
(self.slope > 0) &
(flooded==True))
self.soil__depth[pos_flood] = (self.depo_rate[pos_flood] / (1 - self.phi)) * dt
#non-positive slopes, not flooded
non_pos_not_flood = ((self.q > 0) &
(blowup==True) &
(self.slope <= 0) &
(flooded==False))
self.soil__depth[non_pos_not_flood] += (self.depo_rate[non_pos_not_flood] /
(1 - self.phi) *
dt)
##more general case:
pos_not_flood = ((self.q > 0) &
(blowup==False) &
(self.slope > 0) &
(flooded==False))
self.soil__depth[pos_not_flood] = (self.H_star *
np.log((1 / ((self.depo_rate[pos_not_flood] / (1 - self.phi)) /
(self.sed_erosion_term[pos_not_flood]) - 1)) *
(np.exp((self.depo_rate[pos_not_flood] / (1 - self.phi) -
(self.sed_erosion_term[pos_not_flood]))*(dt / self.H_star)) *
(((self.depo_rate[pos_not_flood] / (1 - self.phi) /
(self.sed_erosion_term[pos_not_flood])) - 1) *
np.exp(self.soil__depth[pos_not_flood] / self.H_star) + 1) - 1)))
#places where slope <= 0 but not flooded:
neg_slope_not_flooded = ((self.q > 0) &
(blowup==False) &
(self.slope <= 0) &
(flooded==False))
self.soil__depth[neg_slope_not_flooded] += (self.depo_rate[neg_slope_not_flooded] /
(1 - self.phi) *
dt)
#flooded nodes:
flooded_nodes = (self.q > 0) & (blowup==False) & (flooded==True)
self.soil__depth[flooded_nodes] += (self.depo_rate[flooded_nodes] /
(1 - self.phi) *
dt)
# where discharge exists
discharge_exists = self.q > 0
self.bedrock__elevation[discharge_exists] += dt * \
(-self.br_erosion_term[discharge_exists] * \
(np.exp(-self.soil__depth[discharge_exists] / self.H_star)))
#finally, determine topography by summing bedrock and soil
cores = self._grid.core_nodes
self.topographic__elevation[cores] = self.bedrock__elevation[cores] + \
self.soil__depth[cores]
def run_with_adaptive_time_step_solver(self, dt=1.0, flooded_nodes=[],
**kwds):
"""Run step with CHILD-like solver that adjusts time steps to prevent
slope flattening.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> import numpy as np
>>> rg = RasterModelGrid((3, 4))
>>> z = rg.add_zeros('topographic__elevation', at='node')
>>> z[:] = 0.1 * rg.x_of_node
>>> H = rg.add_zeros('soil__depth', at='node')
>>> H += 0.1
>>> br = rg.add_zeros('bedrock__elevation', at='node')
>>> br[:] = z - H
>>> fa = FlowAccumulator(rg, flow_director='FlowDirectorSteepest')
>>> fa.run_one_step()
>>> sp = Space(rg, K_sed=1.0, K_br=0.1,
... F_f=0.5, phi=0.0, H_star=1., v_s=1.0,
... m_sp=0.5, n_sp = 1.0, sp_crit_sed=0,
... sp_crit_br=0, solver='adaptive')
>>> sp.run_one_step(dt=10.0)
>>> np.round(sp.Es[5:7], 4)
array([ 0.0029, 0.0074])
>>> np.round(sp.Er[5:7], 4)
array([ 0.0032, 0.0085])
>>> np.round(H[5:7], 3)
array([ 0.088, 0.078])
"""
# Initialize remaining_time, which records how much of the global time
# step we have yet to use up.
remaining_time = dt
z = self._grid.at_node['topographic__elevation']
br = self._grid.at_node['bedrock__elevation']
H = self._grid.at_node['soil__depth']
r = self.flow_receivers
time_to_flat = np.zeros(len(z))
time_to_zero_alluv = np.zeros(len(z))
dzdt = np.zeros(len(z))
cores = self._grid.core_nodes
first_iteration = True
# Outer WHILE loop: keep going until time is used up
while remaining_time > 0.0:
# Update all the flow-link slopes.
#
# For the first iteration, we assume this has already been done
# outside the component (e.g., by flow router), but we need to do
# it ourselves on subsequent iterations.
if not first_iteration:
# update the link slopes
self._update_flow_link_slopes()
# update where nodes are flooded. This shouuldn't happen because
# of the dynamic timestepper, but just in case, we update here.
new_flooded_nodes = np.where(self.slope<0)[0]
flooded_nodes = np.asarray(np.unique(np.concatenate((
flooded_nodes, new_flooded_nodes))), dtype=np.int64)
else:
first_iteration = False
# Calculate rates of entrainment
self._calc_hydrology()
self._calc_erosion_rates()
# CORRECTION HERE?
self.Es[flooded_nodes] = 0.0
self.Er[flooded_nodes] = 0.0
# Zero out sediment influx for new iteration
self.qs_in[:] = 0
calculate_qs_in(np.flipud(self.stack),
self.flow_receivers,
self.cell_area_at_node,
self.q,
self.qs,
self.qs_in,
self.Es,
self.Er,
self.v_s,
self.F_f,
self.phi)
self.depo_rate[self.q > 0] = (self.qs[self.q > 0]
* (self.v_s / self.q[self.q > 0]))
# TODO handle flooded nodes in the above fn
# Now look at upstream-downstream node pairs, and recording the
# time it would take for each pair to flatten. Take the minimum.
dzdt[cores] = self.depo_rate[cores] - (self.Es[cores] + self.Er[cores])
rocdif = dzdt - dzdt[r]
zdif = z - z[r]
time_to_flat[:] = remaining_time
converging = np.where(rocdif < 0.0)[0]
time_to_flat[converging] = -(TIME_STEP_FACTOR * zdif[converging]
/ rocdif[converging])
time_to_flat[np.where(zdif <= 0.0)[0]] = remaining_time
# From this, find the maximum stable time step with regard to slope
# evolution.
dt_max1 = np.amin(time_to_flat)
# Next we consider time to exhaust regolith
time_to_zero_alluv[:] = remaining_time
dHdt = self.porosity_factor * (self.depo_rate) - self.Es
decreasing_H = np.where(dHdt < 0.0)[0]
time_to_zero_alluv[decreasing_H] = - (TIME_STEP_FACTOR
* H[decreasing_H]
/ dHdt[decreasing_H])
# Now find the smallest time that would lead to near-empty alluv
dt_max2 = np.amin(time_to_zero_alluv)
# Take the smaller of the limits
dt_max = min(dt_max1, dt_max2)
if dt_max < self.dt_min:
dt_max = self.dt_min
# Now a vector operation: apply dzdt and dhdt to all nodes
br[cores] -= self.Er[cores] * dt_max
H[cores] += dHdt[cores] * dt_max
z[cores] = br[cores] + H[cores]
# Update remaining time and continue
remaining_time -= dt_max
|
the-stack_106_26453 | """
clint.textui.progress
~~~~~~~~~~~~~~~~~
This module provides the progressbar functionality.
"""
import os
import sys
import time
import crayons
from pipenv.environments import PIPENV_COLORBLIND, PIPENV_HIDE_EMOJIS
STREAM = sys.stderr
MILL_TEMPLATE = "%s %s %i/%i\r"
DOTS_CHAR = "."
if PIPENV_HIDE_EMOJIS:
if PIPENV_COLORBLIND:
BAR_FILLED_CHAR = "="
BAR_EMPTY_CHAR = "-"
else:
BAR_FILLED_CHAR = str(crayons.green("=", bold=True))
BAR_EMPTY_CHAR = str(crayons.black("-"))
else:
if PIPENV_COLORBLIND:
BAR_FILLED_CHAR = "▉"
BAR_EMPTY_CHAR = " "
else:
BAR_FILLED_CHAR = str(crayons.green("▉", bold=True))
BAR_EMPTY_CHAR = str(crayons.black("▉"))
if (sys.version_info[0] >= 3) and (os.name != "nt"):
BAR_TEMPLATE = " %s%s%s %i/%i — {}\r".format(crayons.black("%s"))
else:
if os.name == "nt":
BAR_TEMPLATE = " %s%s%s %i/%i - %s\r"
else:
BAR_TEMPLATE = " %s%s%s %i/%i — %s\r"
MILL_CHARS = ["|", "/", "-", "\\"]
# How long to wait before recalculating the ETA
ETA_INTERVAL = 1
# How many intervals (excluding the current one) to calculate the simple moving
# average
ETA_SMA_WINDOW = 9
class Bar:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
return False # we're not suppressing exceptions
def __init__(
self,
label="",
width=32,
hide=None,
empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR,
expected_size=None,
every=1,
):
self.label = label
self.width = width
self.hide = hide
# Only show bar in terminals by default (better for piping, logging etc.)
if hide is None:
try:
self.hide = not STREAM.isatty()
except AttributeError: # output does not support isatty()
self.hide = True
self.empty_char = empty_char
self.filled_char = filled_char
self.expected_size = expected_size
self.every = every
self.start = time.time()
self.ittimes = []
self.eta = 0
self.etadelta = time.time()
self.etadisp = self.format_time(self.eta)
self.last_progress = 0
if self.expected_size:
self.show(0)
def show(self, progress, count=None):
if count is not None:
self.expected_size = count
if self.expected_size is None:
raise Exception("expected_size not initialized")
self.last_progress = progress
if (time.time() - self.etadelta) > ETA_INTERVAL:
self.etadelta = time.time()
self.ittimes = self.ittimes[-ETA_SMA_WINDOW:] + [
-(self.start - time.time()) / (progress + 1)
]
self.eta = (
sum(self.ittimes)
/ float(len(self.ittimes))
* (self.expected_size - progress)
)
self.etadisp = self.format_time(self.eta)
x = int(self.width * progress / self.expected_size)
if not self.hide:
if (
progress % self.every == 0 # True every "every" updates
or progress == self.expected_size # And when we're done
):
STREAM.write(
BAR_TEMPLATE
% (
self.label,
self.filled_char * x,
self.empty_char * (self.width - x),
progress,
self.expected_size,
self.etadisp,
)
)
STREAM.flush()
def done(self):
self.elapsed = time.time() - self.start
elapsed_disp = self.format_time(self.elapsed)
if not self.hide:
# Print completed bar with elapsed time
STREAM.write(
BAR_TEMPLATE
% (
self.label,
self.filled_char * self.width,
self.empty_char * 0,
self.last_progress,
self.expected_size,
elapsed_disp,
)
)
STREAM.write("\n")
STREAM.flush()
def format_time(self, seconds):
return time.strftime("%H:%M:%S", time.gmtime(seconds))
def bar(
it,
label="",
width=32,
hide=None,
empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR,
expected_size=None,
every=1,
):
"""Progress iterator. Wrap your iterables with it."""
count = len(it) if expected_size is None else expected_size
with Bar(
label=label,
width=width,
hide=hide,
empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR,
expected_size=count,
every=every,
) as bar:
for i, item in enumerate(it):
yield item
bar.show(i + 1)
def dots(it, label="", hide=None, every=1):
"""Progress iterator. Prints a dot for each item being iterated"""
count = 0
if not hide:
STREAM.write(label)
for i, item in enumerate(it):
if not hide:
if i % every == 0: # True every "every" updates
STREAM.write(DOTS_CHAR)
sys.stderr.flush()
count += 1
yield item
STREAM.write("\n")
STREAM.flush()
def mill(it, label="", hide=None, expected_size=None, every=1):
"""Progress iterator. Prints a mill while iterating over the items."""
def _mill_char(_i):
if _i >= count:
return " "
else:
return MILL_CHARS[(_i // every) % len(MILL_CHARS)]
def _show(_i):
if not hide:
if (
_i % every == 0 # True every "every" updates
or _i == count # And when we're done
):
STREAM.write(MILL_TEMPLATE % (label, _mill_char(_i), _i, count))
STREAM.flush()
count = len(it) if expected_size is None else expected_size
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i + 1)
if not hide:
STREAM.write("\n")
STREAM.flush()
|
the-stack_106_26454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Evangelos A. Dimopoulos, Evan K. Irving-Pease"
__copyright__ = "Copyright 2020, University of Oxford"
__email__ = "[email protected]"
__license__ = "MIT"
import argparse
import hashlib
import os
import re
import sys
import pandas as pd
import yaml
REGEX_WHITELIST = r"[\w.-]+"
REGEX_BLACKLIST = r"[^\w.-]+"
PE = "PE"
COLLAPSED = "COLLAPSED"
SE = "SE"
WARNING = "\x1b[33m"
FAIL = "\x1b[31m"
END = "\033[0m"
WARNING_DB = 0
WARNING_USER = 0
is_tty = sys.stdout.isatty()
class ValidationError(Exception):
pass
class ArgumentCustomFormatter(argparse.HelpFormatter):
"""
Custom formatter for argparse
"""
def _get_help_string(self, action):
message = action.help
if "%(default)" not in action.help:
if action.default is not argparse.SUPPRESS and action.default is not None:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
message += " (default: %(default)s)"
return message
class WritablePathType(object):
"""
Is this a writable path.
"""
def __call__(self, value):
from pathlib import Path
try:
path = Path(value).expanduser()
path.mkdir(parents=True, exist_ok=True)
return value
except Exception:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid writable path")
class PositiveIntType(object):
"""
Is this a positive integer
"""
def __call__(self, value):
try:
if not int(value) > 0:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid positive integer")
return int(value)
class RangeType(object):
"""
Is this a valid instance of `_type` and within the range [lower, upper]
"""
def __init__(self, _type, lower, upper):
self.type = _type
self.lower = lower
self.upper = upper
def __call__(self, value):
try:
if not (self.lower <= self.type(value) <= self.upper):
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
f"'{value}' is not a valid {self.type.__name__} in the range ({self.lower}, {self.upper})"
)
return self.type(value)
class FloatRangeType(RangeType):
"""
Is this a float() within the given range
"""
def __init__(self, lower, upper):
super().__init__(float, lower, upper)
class IntRangeType(RangeType):
"""
Is this an int() within the given range
"""
def __init__(self, lower, upper):
super().__init__(int, lower, upper)
class BoolType(object):
"""
Is this a valid boolean
"""
def __call__(self, value):
if isinstance(value, bool):
return value
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid boolean")
class JsonType(object):
"""
Is this a valid JSON string
"""
def __call__(self, value):
import json
try:
return json.loads(value)
except json.decoder.JSONDecodeError as error:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid JSON string\n {error}")
class SpreadsheetFileType(object):
"""
Is it a valid user input file
"""
cols = None
data = None
def __call__(self, value):
if not os.path.exists(value):
raise argparse.ArgumentTypeError(f"'{value}' does not exit")
if os.stat(value).st_size == 0:
raise argparse.ArgumentTypeError(f"'{value}' is empty")
try:
self.data = pd.read_table(
value,
sep="\t",
header=None,
index_col=False,
)
except Exception:
raise argparse.ArgumentTypeError(f"'{value}' unknown error parsing file")
if len(self.data.columns) != len(self.cols):
raise argparse.ArgumentTypeError(
f"'{value}' must have {len(self.cols)} columns and be tab delimited (cols={len(self.data.columns)})"
)
# find the row number of any empty cells
bad_rows = ", ".join([str(i + 1) for i in self.data.index[self.data.isnull().any(axis=1)].tolist()])
if bad_rows:
raise argparse.ArgumentTypeError(f"'{value}' contains missing data in line(s): {bad_rows}")
return value
class AccessionFileType(SpreadsheetFileType):
"""
Is this a valid accession input file.
"""
cols = ["species", "accession"]
def __call__(self, value):
super().__call__(value)
# check all accessions pass the regex pattern
idx = self.cols.index("accession")
species = self.cols.index("species")
bad_list_acc = "\n".join(
[
f"line {i + 1}: '{acc}'"
for i, acc in enumerate(self.data[idx].tolist())
if re.match(REGEX_BLACKLIST, acc) is not None
]
)
bad_list_species = "\n".join(
[
f"line {i + 1}: '{tax}'"
for i, tax in enumerate(self.data[species].tolist())
if re.match(REGEX_BLACKLIST, tax) is not None
]
)
if bad_list_acc or bad_list_species:
bad_list = bad_list_acc + "\n" + bad_list_species
raise argparse.ArgumentTypeError(
f"'{value}' these accession codes or taxon names contain invalid characters:\n{bad_list}"
)
return value
class SequenceFileType(AccessionFileType):
"""
Is this a valid sequence input file.
"""
cols = ["species", "accession", "path"]
def __call__(self, value):
super().__call__(value)
# find any files that don't exist or are empty
idx = self.cols.index("path")
bad_files = "\n".join(
[
f"line {i + 1}: '{file}'"
for i, file in enumerate(self.data[idx].tolist())
if not os.path.exists(file) or os.stat(file).st_size == 0
]
)
if bad_files:
raise argparse.ArgumentTypeError(f"'{value}' these sequence files do not exist or are empty:\n{bad_files}")
return value
class SraAccessionType(object):
"""
Is this a valid SRA accession
"""
def __call__(self, value):
# import these locally to avoid cyclic import issues
from haystac.workflow.scripts.entrez_utils import entrez_esearch, entrez_efetch
try:
# query the SRA to see if this a valid accession
_, _, id_list = entrez_esearch("sra", f"{value}[Accession]")
etree = entrez_efetch("sra", id_list)
except Exception:
raise argparse.ArgumentTypeError(f"Invalid SRA accession '{value}'")
run_code = etree.find('.//RUN').attrib['accession']
if len(id_list) > 1 or value != run_code:
raise argparse.ArgumentTypeError(
f"The SRA accession you have provided {value} does not refer to a sequencing run. "
f"Please visit https://www.ncbi.nlm.nih.gov/sra/ and chose a valid "
f"sequencing run accession for the SRA accession {value}."
)
try:
# now get the library layout
layout = etree.find(".//LIBRARY_LAYOUT/*").tag.lower()
except Exception:
raise argparse.ArgumentTypeError(f"Unable to resolve the library layout for SRA accession '{value}'")
return value, layout
class NuccoreQueryType(object):
"""
Is this a valid nuccore query
"""
def __call__(self, value):
# import these locally to avoid cyclic import issues
from haystac.workflow.scripts.entrez_utils import entrez_esearch
# check if the user has given us a file instead of a string
if os.path.isfile(value):
query = open(value).read().strip()
if not query:
raise argparse.ArgumentTypeError(f"The query file '{value}' is empty.")
else:
query = value
try:
# query nuccore to see if this a valid query
_, _, id_list = entrez_esearch("nuccore", f"{query}")
except Exception:
raise argparse.ArgumentTypeError(f"Invalid NCBI query '{query}'")
# if the query returns no result set raise error
if len(id_list) == 0:
raise argparse.ArgumentTypeError(f"No results in NCBI nucleotide for query '{query}'")
return value
class CheckExistingConfig(object):
"""
Checks the details of an existing yaml file against cli params or another yaml file
"""
def __init__(self, filename, params):
# check if second argument is a dict or a yaml file
if isinstance(params, dict):
params_config = params
elif os.path.isfile(params):
with open(params, "r") as fin_params:
params_config = yaml.safe_load(fin_params)
# check if a config already exists
if not os.path.isfile(filename):
pass
else:
# open the config file
with open(filename, "r") as fin:
existing_config = yaml.safe_load(fin)
if not isinstance(params, dict):
important_args = ["cache"]
else:
important_args = [
"cache",
"api_key",
"mismatch_probability",
"bowtie2_scaling",
"query",
"query_file",
"accessions_file",
"sequences_file",
"refseq_rep",
# "force_accessions",
# "exclude_accessions",
# "resolve_accessions",
"rank",
"mtDNA",
"aDNA",
"seed",
"fastq",
"fastq_r1",
"fastq_r2",
"sra",
"collapse",
"trim_adapters",
"sample",
"min_prob",
"query_file_md5",
"accessions_md5",
"sequences_md5",
]
for arg in important_args:
# check if all the important params match
if arg in existing_config.keys() and arg in params_config.keys():
if existing_config[arg] != params_config[arg]:
print_error(
f"You are trying to set a value for parameter {arg} on top of an already existing one "
f"(old: {existing_config[arg]}, new: {params_config[arg]}). "
f"Please either revert to the original parameter you used or create a "
f"new output directory."
)
class FastqFile(object):
"""
Is it a valid user input fastq file
"""
def __call__(self, value):
if not os.path.exists(value):
raise argparse.ArgumentTypeError(f"'{value}' does not exit")
if os.stat(value).st_size == 0:
raise argparse.ArgumentTypeError(f"'{value}' is empty")
if ".gz" not in value:
with open(value, "r") as fin:
first_line = fin.readline()
else:
import gzip
with gzip.open(value, "rt") as fin:
first_line = fin.readline()
# if first_line[0] != "@" or first_line != '>':
# raise argparse.ArgumentTypeError(f"'{value}' is not a valid fastq file.")
print('here\n')
return value
class BatchType(object):
"""
Is this a valid smk batch string
"""
def __call__(self, value):
try:
rulename, batch, batches = (
value.split("=")[0],
int(value.split("=")[1].split("/")[0]),
int(value.split("=")[1].split("/")[1]),
)
return rulename, batch, batches
except IndexError as error:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid snakemake batch string\n {error}")
def get_total_paths(
checkpoints,
config,
):
"""
Get all the individual fasta file paths for the taxa in our database.
"""
sequences_df = pd.DataFrame()
if config["query"]:
pick_sequences = checkpoints.entrez_pick_sequences.get()
sequences_df = pd.read_csv(pick_sequences.output[0], sep="\t")
assert len(sequences_df) > 0, f"The entrez pick sequences file is empty {pick_sequences.output[0]}"
if config["refseq_rep"]:
sources = []
# refseq rep prok
if config["refseq_rep"] == "prokaryote_rep":
refseq_rep_prok = checkpoints.entrez_refseq_rep_prok_accessions.get()
refseq_genomes = pd.read_csv(refseq_rep_prok.output.refseq_genomes, sep="\t")
genbank_genomes = pd.read_csv(refseq_rep_prok.output.genbank_genomes, sep="\t")
assemblies = pd.read_csv(refseq_rep_prok.output.assemblies, sep="\t")
refseq_plasmids = pd.read_csv(refseq_rep_prok.output.refseq_plasmids, sep="\t")
genbank_plasmids = pd.read_csv(refseq_rep_prok.output.genbank_plasmids, sep="\t")
if not config["force_accessions"]:
invalid_assemblies = checkpoints.entrez_invalid_assemblies.get()
invalid_assembly_sequences = pd.read_csv(invalid_assemblies.output[0], sep="\t")
assemblies = assemblies[
~assemblies["AccessionVersion"].isin(invalid_assembly_sequences["AccessionVersion"])
]
sources = [
refseq_genomes,
genbank_genomes,
assemblies,
refseq_plasmids,
genbank_plasmids,
]
# refseq viruses
elif config["refseq_rep"] == "viruses":
refseq_viruses = checkpoints.entrez_refseq_viruses_accessions.get()
refseq_viral_genomes = pd.read_csv(refseq_viruses.output.refseq_viruses, sep="\t")
sources = [refseq_viral_genomes]
# refseq eukaryotes
elif config["refseq_rep"] == "eukaryotes":
refseq_eukaryotes = checkpoints.entrez_refseq_eukaryotes_accessions.get()
refseq_euk_genomes = pd.read_csv(refseq_eukaryotes.output.refseq_euk, sep="\t")
sources = [refseq_euk_genomes]
if config["query"]:
sources.append(sequences_df)
sequences_df = pd.concat(sources)
if config["sequences"] or config["accessions"]:
check_unique_taxa_in_custom_inputs(config["accessions"], config["sequences"])
if config["sequences"]:
custom_fasta_paths = pd.read_csv(
config["sequences"],
sep="\t",
header=None,
names=["species", "AccessionVersion", "path"],
)
custom_fasta_paths = check_unique_taxa_accs(custom_fasta_paths, config, config["sequences"], "user_file")
custom_seqs = custom_fasta_paths[["species", "AccessionVersion"]].copy()
custom_seqs["AccessionVersion"] = "custom_seq-" + custom_seqs["AccessionVersion"].astype(str)
sequences_df = sequences_df.append(custom_seqs)
if config["accessions"]:
custom_accessions = pd.read_csv(
config["accessions"],
sep="\t",
header=None,
names=["species", "AccessionVersion"],
)
custom_accessions = check_unique_taxa_accs(custom_accessions, config, config["accessions"], "user_file")
sequences_df = sequences_df.append(custom_accessions)
if config["genera"]:
sequences_df = sequences_df[sequences_df["species"].str.contains("|".join(config["genera"]))]
if config["exclude_accessions"]:
sequences_df = sequences_df[~sequences_df["AccessionVersion"].isin(config["exclude_accessions"])]
# check that db accessions are unique
sequences_df = check_unique_taxa_accs(sequences_df, config, "", "db")
inputs = []
for key, seq in sequences_df.iterrows():
orgname, accession = (
normalise_name(seq["species"]),
seq["AccessionVersion"],
)
inputs.append((orgname, accession))
return inputs
def normalise_name(taxon):
"""remove unnecessary characters from a taxon name string."""
return re.sub(REGEX_BLACKLIST, "_", taxon)
def check_unique_taxa_in_custom_inputs(accessions, sequences):
"""Checks that custom input files have only one entry per taxon"""
if accessions != "" and sequences != "":
custom_fasta_paths = pd.read_csv(sequences, sep="\t", header=None, names=["species", "accession", "path"])
custom_accessions = pd.read_csv(accessions, sep="\t", header=None, names=["species", "accession"])
# check if any taxa in common
taxon_acc = custom_accessions["species"].tolist()
taxon_seq = custom_fasta_paths["species"].tolist()
if bool(set(taxon_acc) & set(taxon_seq)):
print_error(
"You have provided the same taxon both in your custom sequences "
"file and your custom accessions file. Please pick and keep ONLY "
"one entry from both of these files. You can only have 1 sequence "
"per chosen taxon in your database."
)
# check if any accessions in common
accession_acc = custom_accessions["accession"].tolist()
accession_seq = custom_fasta_paths["accession"].tolist()
if bool(set(accession_acc) & set(accession_seq)):
print_error(
"You have provided the same accession both in your custom sequences "
"file and your custom accessions file. Please pick and keep ONLY "
"one entry from both of these files, or change the accession entry "
"appropriately in your custom sequences file. You can only have 1 accession name "
"per chosen taxon in your database."
)
def check_unique_taxa_accs(df, config, user_input, to_check):
"""Checks that there are only unique inputs for taxa and accessions"""
# if we are checking the user files
if to_check == "user_file":
# if duplicate accession in user file raise error
if df["AccessionVersion"].duplicated().any():
dup_acc = [i for i in df[df["AccessionVersion"].duplicated()]["AccessionVersion"].to_list()]
message = (
f"{user_input} contains multiple taxa for {', '.join(dup_acc)}. "
f"Please remove/fix all duplicates. Picking automatically a taxon/accession pair in "
f"this case is not possible."
)
print_error(message)
# if duplicate species in user file either raise error, or --resolve-accessions
elif df["species"].duplicated().any():
dup_taxa = [i for i in df[df["species"].duplicated()]["species"].to_list()]
message = f"{user_input} contains multiple sequences for {', '.join(dup_taxa)}. "
if not config["resolve_accessions"]:
message += (
"Either remove all duplicates, or set the `--resolve-accessions` flag to automatically choose one. "
"It is the first accession that will be chosen."
)
print_error(message)
else:
# global WARNING_USER
for idx, val in df[df["species"].duplicated(keep="first")].iterrows():
message += f"Accession {val['AccessionVersion']} for {val['species']} was omitted."
# if WARNING_USER == 0:
print_warning(message)
# WARNING_USER += 1
df = df[~df["species"].duplicated(keep="first")]
return df
# if all good return df as is
else:
return df
# if we are checking the database in total
elif to_check == "db":
# if duplicate accessions in db either raise error, or --resolve-accessions
if df["AccessionVersion"].duplicated().any():
dup_acc = [i for i in df[df["AccessionVersion"].duplicated()]["AccessionVersion"].to_list()]
dup_tax = [i for i in df[df["AccessionVersion"].duplicated(keep=False)]["species"].to_list()]
message = (
f"Accession {', '.join(dup_acc)} appears multiple times in the database "
f"with different taxa names ({', '.join(dup_tax)}). "
)
if not config["resolve_accessions"]:
message += (
f"Please remove/fix all duplicate accessions if possible. "
f"If multiple taxa have the same accession, "
f"that is possibly due to a recent change in NCBI's taxonomy, and it is strongly "
f"advised you check the latest information for these accessions. "
f"Either specify unique pairs of taxa and accessions using the `--accessions-file` or "
f"`--sequences-file` flags, or set the `--resolve-accessions` flag to automatically "
f"choose the first one. "
)
print_error(message)
else:
# global WARNING_DB
for idx, val in df[df["AccessionVersion"].duplicated(keep="first")].iterrows():
message += (
f"{val['species']} has been excluded. It is strongly advised to "
f"check the latest taxonomy info on NCBI."
)
# if WARNING_DB == 0:
print_warning(message)
# WARNING_DB += 1
df = df[~df["AccessionVersion"].duplicated(keep="first")]
return df
# if all good return df as id
else:
return df
def get_final_db_paths(checkpoints):
"""Get all the taxon/acc pairs for the taxa in our database."""
db_sequences = checkpoints.entrez_db_list.get()
sequences_df = pd.read_csv(db_sequences.output[0], sep="\t", names=["species", "AccessionVersion"])
assert len(sequences_df) > 0, (
f"The db file containing the taxon/accession pairs is empty {db_sequences.output[0]}. "
f"Please rebuild the database."
)
inputs = []
for key, seq in sequences_df.iterrows():
orgname, accession = (
normalise_name(seq["species"]),
seq["AccessionVersion"],
)
inputs.append((orgname, accession))
return inputs
def chunker(seq, size):
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
def md5(filename):
hash_md5 = hashlib.md5()
# open file and get the checksum
with open(filename, "rb") as f:
# read it in chunks in case the file is big
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def print_error(message):
"""Function to print errors and exit"""
message = f"haystac: error: {message}"
print(f"{FAIL}{message}{END}" if is_tty else message, file=sys.stderr)
exit(1)
def print_warning(message):
"""Function to print warnings"""
message = f"WARNING: {message}"
print(f"{WARNING}{message}{END}" if is_tty else message, file=sys.stderr)
def get_smk_config():
"""Function to read the smk config and return a dictionary"""
try:
with open(".snakemake/config.yaml") as fin:
config = yaml.safe_load(fin)
except FileNotFoundError:
config = {}
return config
|
the-stack_106_26456 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import sys
import inspect
from py4j.protocol import Py4JJavaError
__all__ = []
def _exception_message(excp):
"""Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicöde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
"""
if isinstance(excp, Py4JJavaError):
# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'
# attribute in Python 2. We should call 'str' function on this exception in general but
# 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work
# around by the direct call, '__str__()'. Please see SPARK-23517.
return excp.__str__()
if hasattr(excp, "message"):
return excp.message
return str(excp)
def _get_argspec(f):
"""
Get argspec of a function. Supports both Python 2 and Python 3.
"""
# `getargspec` is deprecated since python3.0 (incompatible with function annotations).
# See SPARK-23569.
if sys.version_info[0] < 3:
argspec = inspect.getargspec(f)
else:
argspec = inspect.getfullargspec(f)
return argspec
def majorMinorVersion(version):
"""
Get major and minor version numbers for given Spark version string.
>>> version = "2.4.0"
>>> majorMinorVersion(version)
(2, 4)
>>> version = "abc"
>>> majorMinorVersion(version) is None
True
"""
m = re.search('^(\d+)\.(\d+)(\..*)?$', version)
if m is None:
return None
else:
return (int(m.group(1)), int(m.group(2)))
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
the-stack_106_26460 | import unittest
import numpy as np
from cpprb import LaBERmean, LaBERlazy, LaBERmax
class TestLaBER:
def test_init(self):
laber = self.cls(12)
self.assertEqual(laber.batch_size, 12)
np.testing.assert_array_equal(laber.idx, [i for i in range(12*4)])
self.assertEqual(laber.eps, 1e-6)
with self.assertRaises(ValueError):
self.cls(-12)
laber = self.cls(12, 5)
self.assertEqual(laber.batch_size, 12)
np.testing.assert_array_equal(laber.idx, [i for i in range(12*5)])
self.assertEqual(laber.eps, 1e-6)
with self.assertRaises(ValueError):
self.cls(12, -4)
laber = self.cls(12, 5, eps=1e-4)
self.assertEqual(laber.batch_size, 12)
np.testing.assert_array_equal(laber.idx, [i for i in range(12*5)])
self.assertEqual(laber.eps, 1e-4)
with self.assertRaises(ValueError):
self.cls(12, 4, eps=-2)
def test_call(self):
batch_size = 32
m = 4
m_batch = batch_size * m
laber = self.cls(batch_size, m)
with self.assertRaises(ValueError):
laber(priorities=[])
sample = laber(priorities=[1.0]*m_batch)
self.assertEqual(sample["indexes"].shape, (batch_size, ))
self.assertEqual(sample["weights"].shape, (batch_size, ))
def test_uniform(self):
laber = self.cls(2, 2)
sample = laber(priorities=[1,1,1,1])
np.testing.assert_array_equal(sample["weights"], self.uniform)
def test_onehot(self):
laber = self.cls(2, 2, eps=0)
sample = laber(priorities=[1, 0, 0, 0])
np.testing.assert_array_equal(sample["indexes"], [0, 0])
np.testing.assert_array_equal(sample["weights"], self.onehot)
class TestLaBERmean(TestLaBER, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cls = LaBERmean
cls.uniform = (1, 1)
cls.onehot = (0.25, 0.25)
class TestLaBERlazy(TestLaBER, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cls = LaBERlazy
cls.uniform = (4, 4)
cls.onehot = (1, 1)
class TestLaBERmax(TestLaBER, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cls = LaBERmax
cls.uniform = (1, 1)
cls.onehot = (1, 1)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_26461 | """
MIT License
Copyright (c) 2021 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fire.converters import Member, TextChannel
from discord.ext import commands
import datetime
import discord
import asyncio
import random
import uuid
import io
class Tickets(commands.Cog, name="Tickets"):
def __init__(self, bot):
self.bot = bot
self.words = open('./words.txt').read().split(' ')
@commands.group(name='tickets', description='View all the ticket configuration commands', aliases=['ticket'])
@commands.has_permissions(manage_channels=True)
async def tickets_group(self, ctx):
if ctx.invoked_subcommand:
return
embed = discord.Embed(color=ctx.author.color, timestamp=datetime.datetime.now(
datetime.timezone.utc), description='Here are all the ticket configuration commands')
embed.add_field(
name=f'{ctx.prefix}ticket category [<category>]',
value='Set the category were tickets are made. **Setting this enables tickets**'
'\nRunning this command without providing a category resets it, therefore disabling tickets',
inline=False
)
embed.add_field(
name=f'{ctx.prefix}ticket limit <number>',
value='Limit the number of tickets a user can make, 0 = No Limit',
inline=False
)
embed.add_field(
name=f'{ctx.prefix}ticket name <name>',
value='Set the name for tickets. There are many variables available for use in the name',
inline=False
)
embed.set_author(name=str(ctx.author), icon_url=str(
ctx.author.avatar_url_as(static_format='png')))
return await ctx.send(embed=embed)
@tickets_group.command(name='category', description='Set the category where tickets are made')
@commands.has_permissions(manage_channels=True)
async def tickets_category(self, ctx, category: discord.CategoryChannel = None):
await ctx.config.set('tickets.parent', category)
if not category:
return await ctx.success(f'Successfully disabled tickets.')
return await ctx.success(f'Successfully enabled tickets and set the category to {category}.')
@tickets_group.command(name='limit', description='Set the limit for how many tickets a user can make')
@commands.has_permissions(manage_channels=True)
async def tickets_limit(self, ctx, limit: int = 0):
if limit < 0 or limit > 20:
return await ctx.error('Invalid limit')
await ctx.config.set('tickets.limit', limit)
return await ctx.success(f'Successfully set the ticket limit to {limit}')
@tickets_group.command(name='name', description='Set the name for tickets')
@commands.has_permissions(manage_channels=True)
async def tickets_name(self, ctx, name: str = None):
variables = {
'{increment}': ctx.config.get('tickets.increment'),
'{name}': ctx.author.name,
'{id}': ctx.author.id,
'{word}': random.choice(self.words),
'{uuid}': str(uuid.uuid4())[:4]
}
if not name:
variables = '\n'.join([f'{k}: {v}' for k, v in variables.items()])
current = ctx.config.get('tickets.name')
embed = discord.Embed(
color=ctx.author.color, timestamp=datetime.datetime.now(datetime.timezone.utc))
embed.add_field(name='Variables', value=variables, inline=False)
return await ctx.send(embed=embed)
if len(name) > 50:
return await ctx.error('Name is too long, it must be 50 chars or less')
await ctx.config.set('tickets.name', name)
fname = name
for k, v in variables.items():
fname = fname.replace(k, str(v))
return await ctx.success(f'Successfully set the tickets name to {name}\nExample: {fname}')
@commands.command(name='new', description='Makes a new ticket')
@commands.bot_has_permissions(manage_channels=True, manage_roles=True)
async def tickets_new(self, ctx, *, subject: str = "No subject given"):
creating = None
if not ctx.silent:
creating = await ctx.send('Creating your ticket...')
config = ctx.config
parent = ctx.ticket_override if config.get(
"tickets.allow_override") and ctx.ticket_override else config.get('tickets.parent')
limit = config.get('tickets.limit')
if not parent and not ctx.silent:
return await ctx.error('Tickets are not enabled here')
if limit and len([c for c in config.get("tickets.channels") if isinstance(c, discord.TextChannel) and str(ctx.author.id) in str(c.topic)]) >= limit:
if not ctx.silent:
return await ctx.error('You have too many tickets open!')
return
variables = {
'{increment}': config.get('tickets.increment'),
'{name}': ctx.author.name,
'{id}': ctx.author.id,
'{word}': random.choice(self.words),
'{uuid}': str(uuid.uuid4())[:4],
'{crab}': '🦀' # crab in the code? nah, crab in the ticket name
}
name = config.get('tickets.name')
for k, v in variables.items():
# asbyth has me putting crabs everywhere
name = name.replace(k, str(v)).replace('crab', '🦀')
overwrites = {
ctx.author: discord.PermissionOverwrite(read_messages=True, send_messages=True),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_channels=True, manage_roles=True),
ctx.guild.default_role: discord.PermissionOverwrite(
read_messages=False)
}
overwrites.update(parent.overwrites)
ticket = await parent.create_text_channel(
name=name[:50],
overwrites=overwrites,
topic=f'Ticket created by {ctx.author} ({ctx.author.id}) with subject "{subject}"',
reason=f'Ticket created by {ctx.author} ({ctx.author.id})'
)
embed = discord.Embed(
title=f'Ticket opened by {ctx.author}',
timestamp=datetime.datetime.now(datetime.timezone.utc),
color=ctx.author.color
)
embed.add_field(name='Subject', value=subject)
open_msg = await ticket.send(embed=embed)
# Removes any channels that no longer exist.
tchannels = [c for c in config.get('tickets.channels') if c]
tchannels.append(ticket)
await config.set('tickets.channels', tchannels)
await config.set('tickets.increment', config.get('tickets.increment') + 1)
self.bot.dispatch('ticket_create', ctx, ticket, open_msg)
if creating:
return await creating.edit(
content=f'<:yes:534174796888408074> Successfully made your ticket, {ticket.mention}'
)
@commands.command(name='add', description='Add a user to the current ticket')
@commands.bot_has_permissions(manage_roles=True)
async def tickets_add(self, ctx, *, user: Member):
tchannels = ctx.config.get('tickets.channels')
if ctx.channel not in tchannels:
return await ctx.error('This command can only be ran in ticket channels!')
if str(ctx.author.id) not in ctx.channel.topic and not ctx.author.permissions_in(ctx.channel).manage_channels:
return await ctx.error('You must own this ticket or have `Manage Channels` permission to add users')
overwrites = ctx.channel.overwrites
overwrites.update({user: discord.PermissionOverwrite(
read_messages=True, send_messages=True)})
await ctx.channel.edit(overwrites=overwrites)
return await ctx.success(f'Successfully added {user.mention} to the ticket')
@commands.command(name='remove', description='Remove a user from the current ticket')
@commands.bot_has_permissions(manage_roles=True)
async def tickets_remove(self, ctx, *, user: Member):
tchannels = ctx.config.get('tickets.channels')
if ctx.channel not in tchannels:
return await ctx.error('This command can only be ran in ticket channels!')
if str(ctx.author.id) not in ctx.channel.topic and not ctx.author.permissions_in(ctx.channel).manage_channels:
return await ctx.error('You must own this ticket or have `Manage Channels` permission to remove users')
if str(user.id) in ctx.channel.topic:
return await ctx.error('You cannot remove the ticket author')
if not user.permissions_in(ctx.channel).read_messages:
return await ctx.error(f'{user} is not here, so how are you gonna remove them? 🤔')
if user.permissions_in(ctx.channel).manage_channels:
return await ctx.error(f'You cannot remove this user')
overwrites = ctx.channel.overwrites
overwrites.update({user: discord.PermissionOverwrite(
read_messages=False, send_messages=False)})
await ctx.channel.edit(overwrites=overwrites)
return await ctx.success(f'Successfully removed {user} from the ticket')
@commands.command(name='close', description='Closes a ticket, uploads the transcript to action logs channel and sends to the ticket author')
@commands.bot_has_permissions(manage_roles=True)
@commands.max_concurrency(1, commands.BucketType.channel)
async def tickets_close(self, ctx, *, reason: str = "No Reason Provided"):
config = ctx.config
tchannels = [c for c in config.get('tickets.channels') if c]
if ctx.channel not in tchannels:
return await ctx.error('This command can only be ran in ticket channels!')
if not ctx.author.permissions_in(ctx.channel).manage_channels and not str(ctx.author.id) in str(ctx.channel.topic):
return await ctx.error('You must own this ticket or have `Manage Channels` permission to close')
await ctx.error(f'Are you sure you want to close this ticket? Type `close` to confirm')
try:
await self.bot.wait_for('message', check=lambda m: m.author == ctx.author and m.channel == ctx.channel and m.content.lower() == 'close', timeout=10)
except asyncio.TimeoutError:
return await ctx.error('No response, aborting close.')
closing = await ctx.send('Closing ticket, this may make take a bit...')
tchannels.remove(ctx.channel)
await config.set('tickets.channels', tchannels)
transcript = []
async for m in ctx.channel.history(limit=None):
transcript.append(
f'{m.author} ({m.author.id}) at {m.created_at.strftime("%d/%m/%Y @ %I:%M:%S %p")} UTC\n{m.content}')
transcript.reverse()
string = io.StringIO('\n\n'.join(transcript))
# If author is not found for some odd reason, fallback to message author for log embed color
author = ctx.author
for m in ctx.channel.members:
if str(m.id) in ctx.channel.topic: # they do be the ticket author doe
author = m
try:
await m.send(f'Your ticket in {ctx.guild} was closed for the reason "{reason}". The transcript is below',
file=discord.File(string, filename=f'{ctx.channel}-transcript.txt'))
except Exception:
pass # no transcript for you, boo hoo :(
actionlogs = config.get(
'tickets.transcript_logs') or config.get('log.action')
if actionlogs:
transcript.append(
f'{len(transcript)} total messages, closed by {ctx.author}')
string = io.StringIO('\n\n'.join(transcript))
embed = discord.Embed(
title=f'Ticket {ctx.channel} was closed',
timestamp=datetime.datetime.now(datetime.timezone.utc),
color=author.color
)
embed.add_field(
name='Closed by', value=f'{ctx.author} ({ctx.author.id})', inline=False)
embed.add_field(name='Reason', value=reason, inline=False)
await actionlogs.send(
embed=embed,
# Will make this better soon
file=discord.File(
string, filename=f'transcript.txt') if ctx.channel.category.id != 755796036198596688 else None
)
await ctx.channel.delete(reason=f'Ticket closed by {ctx.author} for "{reason}"')
self.bot.dispatch('ticket_close', ctx, author)
def setup(bot):
bot.add_cog(Tickets(bot))
bot.logger.info(f'$GREENLoaded $CYANTickets $GREENmodule!')
|
the-stack_106_26462 | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.exceptions import BadRequest, ItemNotFound
from cloudroast.compute.fixtures import ComputeAdminFixture
class PrivateFlavorTest(ComputeAdminFixture):
@classmethod
def setUpClass(cls):
super(PrivateFlavorTest, cls).setUpClass()
cls.flavor_name = rand_name('flavor')
cls.flavor = cls.admin_flavors_client.create_flavor(
name=cls.flavor_name, ram='64', vcpus='1', disk='10',
is_public=False).entity
cls.admin_flavors_client.add_tenant_access(
flavor_id=cls.flavor.id, tenant=cls.user_config.tenant_id)
@classmethod
def tearDownClass(cls):
super(PrivateFlavorTest, cls).tearDownClass()
cls.admin_flavors_client.delete_flavor(cls.flavor.id)
@tags(type='positive', net='no')
def test_create_server_with_private_flavor(self):
resp = self.server_behaviors.create_active_server(
flavor_ref=self.flavor.id)
server = resp.entity
self.resources.add(server.id, self.servers_client.delete_server)
@tags(type='positive', net='no')
def test_list_private_flavor(self):
response = self.flavors_client.list_flavors_with_detail()
flavors = response.entity
self.assertIn(self.flavor, flavors)
@tags(type='positive', net='no')
def test_get_private_flavor(self):
response = self.flavors_client.get_flavor_details(self.flavor.id)
self.assertEqual(response.status_code, 200)
class PrivateFlavorNegativeTest(ComputeAdminFixture):
@classmethod
def setUpClass(cls):
super(PrivateFlavorNegativeTest, cls).setUpClass()
cls.flavor_name = rand_name('flavor')
cls.flavor = cls.admin_flavors_client.create_flavor(
name=cls.flavor_name, ram='64', vcpus='1', disk='10',
is_public=False).entity
@tags(type='negative', net='no')
def test_create_server_without_flavor_permissions_fails(self):
with self.assertRaises(BadRequest):
resp = self.server_behaviors.create_active_server(
flavor_ref=self.flavor.id)
server = resp.entity
self.resources.add(server.id, self.servers_client.delete_server)
@tags(type='negative', net='no')
def test_private_flavor_not_listed_without_permissions(self):
response = self.flavors_client.list_flavors_with_detail()
flavors = response.entity
self.assertNotIn(self.flavor, flavors)
@tags(type='negative', net='no')
def test_get_private_flavor_fails_without_permissions(self):
with self.assertRaises(ItemNotFound):
self.flavors_client.get_flavor_details(self.flavor.id)
|
the-stack_106_26463 | """Test module for tools pkg i.e. MagicTools"""
from IPython import get_ipython
import jarvis
import tools
ip = get_ipython()
my_magic = jarvis.MagicTools(ip)
def test_retrieve_pkg_version(capsys):
"""Notebook equivalent:
%retrieve_pkg_version
"""
my_magic.retrieve_pkg_version('')
captured = capsys.readouterr()
expected = ["lxml==", "notebook==", "plantuml==", "jarvis4se==", "pandas==", "python=="]
assert all(i in captured.out for i in expected)
def test_diagram_cell(capsys, mocker):
"""Notebook equivalent:
%%diagram
@startuml
!define Junction_Or circle #black
!define Junction_And circle #whitesmoke
Junction_And JunctionAnd
Junction_Or JunctionOr
archimate #Technology "VPN Server" as vpnServerA <<technology-device>>
rectangle GO #lightgreen
rectangle STOP #red
rectangle WAIT #orange
GO -up-> JunctionOr
STOP -up-> JunctionOr
STOP -down-> JunctionAnd
WAIT -down-> JunctionAnd
@enduml
"""
spy = mocker.spy(tools, "get_url_from_string")
my_magic.diagram('', "@startuml\n"
"!define Junction_Or circle #black\n"
"!define Junction_And circle #whitesmoke\n"
"\n"
"\n"
"Junction_And JunctionAnd\n"
"Junction_Or JunctionOr\n"
"\n"
"archimate #Technology 'VPN Server' as vpnServerA <<technology-device>>\n"
"\n"
"rectangle GO #lightgreen\n"
"rectangle STOP #red\n"
"rectangle WAIT #orange\n"
"GO -up-> JunctionOr\n"
"STOP -up-> JunctionOr\n"
"STOP -down-> JunctionAnd\n"
"WAIT -down-> JunctionAnd\n"
"@enduml\n")
expected_plantuml_link = "http://www.plantuml.com/plantuml/svg/TL2nhi8m3Dpz5KOTcFe7gA8JUWmK" \
"YGf651BJHgGESjCY_fx04oW8sExEToVRypue2KFdO6BeQ9bmER0ErlE-4jHMj2FC3ax" \
"fqwUZPFEoN5eRgE_yYG3WpV4a4KDQ_iIL02ZHhUrKY4KrwPQzyyqLfzlr2ZSa8yaKLO" \
"_ZcVzPYRDPUFboGwFLL1G0GZeeRk92YmepPvisD4B4oM1JLslCX4oYxSg_6ZClaH74P" \
"3wSyo9Ty17weHf_uKI_d_de-pQO4vlxisy="
expected_notebook_output = "<IPython.core.display.HTML object>\n" \
"Overview :\n" \
"<IPython.core.display.Markdown object>\n"
captured = capsys.readouterr()
assert spy.spy_return == expected_plantuml_link
assert captured.out == expected_notebook_output
|
the-stack_106_26464 | #!/usr/bin/env python3
# Copyright (c) 2021, Justin D Holcomb ([email protected]) All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
# Script Info #
################################################################################
# Title: csv_converter.py
# Author: Justin D Holcomb ([email protected])
# Created: June 28, 2021
# Version: 0.0.1
# Requires: Python 3.6+ (fstrings)
# Description: Converts a CSV from a source system, enriches the
# data, then exports to a destination CSV format.
################################################################################
# Import Modules #
################################################################################
from pathlib import Path
from os.path import exists
import argparse
import csv
import datetime
import json
import logging
################################################################################
# Variables #
################################################################################
ARGPARSE_DESCRIPTION = """
Converts a CSV from a source system, enriches the data, then exports to a
destination CSV format.
Requires:
Python 3.6+
"""
# These are the headers in the CSV for the source file.
SOURCE_HEADERS = {
"No",
"Customer ID",
"Company Name",
"Flat Charge",
"Current Volume",
"Current Amount",
"YTD Volume",
"YTD Amount",
}
# These are the headers in the CSV for the mapping file.
MAPPING_HEADERS = {
"Customer ID",
"Company Name",
"Account Number",
}
# This is the header name used to match an account from the two systems.
ACCOUNT_KEY = "Customer ID"
# Contains the source field name and what it needs to be renamed to.
FIELD_NAME_CHANGE = {
"SOURCE_FIELD_NAME": "DESTINATION_FIELD_NAME",
}
# These are the headers in the CSV for the destination file (to the billing software).
BILLING_HEADERS = {
"No",
"Customer ID",
"Company Name",
"Account Number",
"Flat Charge",
"Current Volume",
"Current Amount",
"YTD Volume",
"YTD Amount",
}
DEFAULT_SOURCE_CSV_FILENAME = "accounting.csv"
DEFAULT_MAPPING_CSV_FILENAME = "account_mapping.csv"
DEFAULT_DEST_CSV_FILENAME_PREFIX = "billing_"
DEFAULT_DEST_CSV_FILENAME = f"{DEFAULT_DEST_CSV_FILENAME_PREFIX}YYYYMMDD_HHMM.csv"
LOGGER = logging.getLogger(__name__)
################################################################################
# Functions #
################################################################################
def convert_formats(accounting_data, mapping_data):
"""
This converts the original data and changes it to match the desired format
for the destination system.
"""
line_count = 0
# Iterate over each line in source file.
for row in accounting_data:
line_count += 1
LOGGER.info(f"Converting line {line_count}")
## Check if account number is in account mapping file.
# If there is not a matching account number: prompt the user, immediately
# add it to the mapping file.
if row[ACCOUNT_KEY] not in mapping_data.keys():
row['Account Number'] = prompt_user_for_account(row['Customer ID'], row['Company Name'])
mapping_data[row[ACCOUNT_KEY]] = {
"Customer ID": row['Customer ID'],
"Company Name": row['Company Name'],
"Account Number": row['Account Number'],
}
write_csv(DEFAULT_MAPPING_CSV_FILENAME, MAPPING_HEADERS, mapping_data.values())
else:
row['Account Number'] = mapping_data[row[ACCOUNT_KEY]]['Account Number']
# Rename source field names to match expected destination field names.
for source_field, dest_field in FIELD_NAME_CHANGE.items():
if source_field in row.keys():
row[dest_field] = row[source_field]
del row[source_field]
# Add any special handling here. Such as splitting fields or other field specific processing.
return accounting_data
def load_csv(filename, headers, load_as_dict=False):
"""
Loads data from a CSV file. Can load data as an dictionary or as a list of
dictionaries.
"""
line_count = 0
if load_as_dict:
dataset = {}
else:
dataset = []
with open(filename, encoding='utf-8-sig') as fp:
csv_reader = csv.DictReader(
fp,
#fieldnames=headers,
quoting=csv.QUOTE_ALL,
lineterminator='\r\n',
delimiter=','
)
for row in csv_reader:
line_count += 1
if load_as_dict:
dataset[row[ACCOUNT_KEY]] = row
else:
dataset.append(row)
LOGGER.info(f"Processed {line_count} lines from {filename}")
return dataset
def prompt_user_for_account(id, name):
"""
Prompts a user for input and returns the value.
TODO: Add error handling and input validation.
"""
return input(f"Please enter the account number for '{name}' aka ID {id}: ")
def write_billing_csv(data, user_filename):
"""
This is a wrapper to the `write_csv()` function to calculate a filename with
a timestamp.
"""
# If a filename that is not the default, use that value. Otherwise calculate
# the filename.
if user_filename == DEFAULT_DEST_CSV_FILENAME:
DEFAULT_DEST_CSV_FILENAME_PREFIX
timestamp = datetime.datetime.now()
timestamp_string = timestamp.strftime("%Y%m%d_%H%M")
filename = f"{DEFAULT_DEST_CSV_FILENAME_PREFIX}{timestamp_string}.csv"
else:
filename = user_filename
write_csv(filename, BILLING_HEADERS, data)
def write_csv(filename, headers, dataset):
"""
Writes data to a CSV file.
"""
with open(filename, 'w') as fp:
wr = csv.DictWriter(
fp,
fieldnames=headers,
quoting=csv.QUOTE_ALL,
lineterminator='\r\n',
delimiter=','
)
wr.writeheader()
wr.writerows(dataset)
return True
################################################################################
# Main #
################################################################################
def parse_args():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=ARGPARSE_DESCRIPTION)
parser.add_argument('--source-csv-filename',
dest="source_csv_filename",
help=f"CSV from water accounting system. Default: {DEFAULT_SOURCE_CSV_FILENAME}",
default=DEFAULT_SOURCE_CSV_FILENAME)
parser.add_argument('--dest-csv-filename',
dest="dest_csv_filename",
help=f"CSV for the billing system. The default output is f{DEFAULT_DEST_CSV_FILENAME_PREFIX}_YYYYMMDD_HHMM.csv",
default=DEFAULT_DEST_CSV_FILENAME)
parser.add_argument('-v',
dest="warn_logging",
help=f"Use WARNING level logging output.",
action="store_true",
default=False)
parser.add_argument('-vv',
dest="info_logging",
help=f"Use INFO level logging output.",
action="store_true",
default=False)
parser.add_argument('-vvv',
dest="debug_logging",
help=f"Use most verbose DEBUG level logging output.",
action="store_true",
default=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
''' When manually ran from command line. '''
args = parse_args()
# Setup logging.
if args.debug_logging is True:
LOGGER.setLevel(logging.DEBUG)
elif args.info_logging is True:
LOGGER.setLevel(logging.INFO)
elif args.warn_logging is True:
LOGGER.setLevel(logging.WARNING)
else:
LOGGER.setLevel(logging.ERROR)
CH = logging.StreamHandler()
FORMATTER = logging.Formatter("%(message)s")
CH.setFormatter(FORMATTER)
LOGGER.addHandler(CH)
LOGGER.info(f"Loading water accounting data from: {args.source_csv_filename}")
LOGGER.debug(f"Expecting water accounting data headers: {SOURCE_HEADERS}")
this_months_data = load_csv(args.source_csv_filename, SOURCE_HEADERS)
LOGGER.debug("Data from water accounting:")
LOGGER.debug(json.dumps(this_months_data, indent=4))
# Create mapping file if missing.
if exists(DEFAULT_MAPPING_CSV_FILENAME) == False:
LOGGER.error(f"Mapping file is missing, creating.")
Path(DEFAULT_MAPPING_CSV_FILENAME).touch()
# Load account mapping from CSV.
LOGGER.info(f"Loading account mapping data from: {DEFAULT_MAPPING_CSV_FILENAME}")
LOGGER.debug(f"Expecting account mapping headers: {MAPPING_HEADERS}")
account_mapping = load_csv(DEFAULT_MAPPING_CSV_FILENAME, MAPPING_HEADERS, load_as_dict=True)
LOGGER.debug("Data from account mapping:")
LOGGER.debug(json.dumps(account_mapping, indent=4))
# Process the water accounting CSV to the billing CSV.
LOGGER.info("Converting CSV to destination format.")
processed_data = convert_formats(this_months_data, account_mapping)
# Write the billing data to a timestamped CSV.
LOGGER.info("Writing converted data to CSV.")
write_billing_csv(processed_data, args.dest_csv_filename)
|
the-stack_106_26465 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'katana_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
the-stack_106_26467 | """PythonDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls')),
path('blog/', include('blog.urls')),
]
|
the-stack_106_26468 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import odoo.tests
RE_ONLY = re.compile('QUnit\.only\(')
@odoo.tests.tagged('post_install', '-at_install')
class WebSuite(odoo.tests.HttpCase):
def test_01_js(self):
# webclient desktop test suite
self.phantom_js('/web/tests?mod=web&failfast', "", "", login='admin', timeout=1800)
def test_02_js(self):
# webclient mobile test suite
self.phantom_js('/web/tests/mobile?mod=web&failfast', "", "", login='admin', timeout=1800)
def test_check_suite(self):
# verify no js test is using `QUnit.only` as it forbid any other test to be executed
self._check_only_call('web.qunit_suite')
self._check_only_call('web.qunit_mobile_suite')
def _check_only_call(self, suite):
# As we currently aren't in a request context, we can't render `web.layout`.
# redefinied it as a minimal proxy template.
self.env.ref('web.layout').write({'arch_db': '<t t-name="web.layout"><t t-raw="head"/></t>'})
for asset in self.env['ir.qweb']._get_asset_content(suite, options={})[0]:
filename = asset['filename']
if not filename or asset['atype'] != 'text/javascript':
continue
with open(filename, 'rb') as fp:
if RE_ONLY.search(fp.read().decode('utf-8')):
self.fail("`QUnit.only()` used in file %r" % asset['url'])
|
the-stack_106_26469 | from collections import OrderedDict
from collections.abc import Mapping, Iterator
from contextlib import contextmanager
from functools import partial
from hashlib import md5
from numbers import Number
from operator import getitem
import inspect
import pickle
import os
import threading
import uuid
from tlz import merge, groupby, curry, identity
from tlz.functoolz import Compose
from .compatibility import is_dataclass, dataclass_fields
from .context import thread_state
from .core import flatten, quote, get as simple_get, literal
from .hashing import hash_buffer_hex
from .utils import Dispatch, ensure_dict, apply, key_split
from . import config, local, threaded
__all__ = (
"DaskMethodsMixin",
"annotate",
"is_dask_collection",
"compute",
"persist",
"optimize",
"visualize",
"tokenize",
"normalize_token",
"get_collection_name",
"replace_name_in_key",
"clone_key",
)
@contextmanager
def annotate(**annotations):
"""Context Manager for setting HighLevelGraph Layer annotations.
Annotations are metadata or soft constraints associated with
tasks that dask schedulers may choose to respect: They signal intent
without enforcing hard constraints. As such, they are
primarily designed for use with the distributed scheduler.
Almost any object can serve as an annotation, but small Python objects
are preferred, while large objects such as NumPy arrays are discouraged.
Callables supplied as an annotation should take a single *key* argument and
produce the appropriate annotation. Individual task keys in the annotated collection
are supplied to the callable.
Parameters
----------
**annotations : key-value pairs
Examples
--------
All tasks within array A should have priority 100 and be retried 3 times
on failure.
>>> import dask
>>> import dask.array as da
>>> with dask.annotate(priority=100, retries=3):
... A = da.ones((10000, 10000))
Prioritise tasks within Array A on flattened block ID.
>>> nblocks = (10, 10)
>>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]):
... A = da.ones((1000, 1000), chunks=(100, 100))
Annotations may be nested.
>>> with dask.annotate(priority=1):
... with dask.annotate(retries=3):
... A = da.ones((1000, 1000))
... B = A + 1
"""
# Sanity check annotations used in place of
# legacy distributed Client.{submit, persist, compute} keywords
if "workers" in annotations:
if isinstance(annotations["workers"], (list, set, tuple)):
annotations["workers"] = list(annotations["workers"])
elif isinstance(annotations["workers"], str):
annotations["workers"] = [annotations["workers"]]
elif callable(annotations["workers"]):
pass
else:
raise TypeError(
"'workers' annotation must be a sequence of str, a str or a callable, but got %s."
% annotations["workers"]
)
if (
"priority" in annotations
and not isinstance(annotations["priority"], Number)
and not callable(annotations["priority"])
):
raise TypeError(
"'priority' annotation must be a Number or a callable, but got %s"
% annotations["priority"]
)
if (
"retries" in annotations
and not isinstance(annotations["retries"], Number)
and not callable(annotations["retries"])
):
raise TypeError(
"'retries' annotation must be a Number or a callable, but got %s"
% annotations["retries"]
)
if (
"resources" in annotations
and not isinstance(annotations["resources"], dict)
and not callable(annotations["resources"])
):
raise TypeError(
"'resources' annotation must be a dict, but got %s"
% annotations["resources"]
)
if (
"allow_other_workers" in annotations
and not isinstance(annotations["allow_other_workers"], bool)
and not callable(annotations["allow_other_workers"])
):
raise TypeError(
"'allow_other_workers' annotations must be a bool or a callable, but got %s"
% annotations["allow_other_workers"]
)
prev_annotations = config.get("annotations", {})
new_annotations = {
**prev_annotations,
**{f"annotations.{k}": v for k, v in annotations.items()},
}
with config.set(new_annotations):
yield
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin:
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename="mydask", format=None, optimize_graph=False, **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
return visualize(
self,
filename=filename,
format=format,
optimize_graph=optimize_graph,
**kwargs,
)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask array turns into a NumPy array and a Dask dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def __await__(self):
try:
from distributed import wait, futures_of
except ImportError as e:
raise ImportError(
"Using async/await with dask requires the `distributed` package"
) from e
from tornado import gen
@gen.coroutine
def f():
if futures_of(self):
yield wait(self)
raise gen.Return(self)
return f().__await__()
def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, "__dask_optimize__", dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, optimizations=(), **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
from .highlevelgraph import HighLevelGraph
optimizations = tuple(optimizations) + tuple(config.get("optimizations", ()))
if optimize_graph:
groups = groupby(optimization_function, collections)
graphs = []
for opt, val in groups.items():
dsk, keys = _extract_graph_and_keys(val)
dsk = opt(dsk, keys, **kwargs)
for opt in optimizations:
dsk = opt(dsk, keys, **kwargs)
graphs.append(dsk)
# Merge all graphs
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
dsk = HighLevelGraph.merge(*graphs)
else:
dsk = merge(*map(ensure_dict, graphs))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``."""
from .highlevelgraph import HighLevelGraph
graphs, keys = [], []
for v in vals:
graphs.append(v.__dask_graph__())
keys.append(v.__dask_keys__())
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
graph = HighLevelGraph.merge(*graphs)
else:
graph = merge(*map(ensure_dict, graphs))
return graph, keys
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in dataclass_fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = d.optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, **kwargs)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = []
for a in collections:
r, s = a.__dask_postpersist__()
postpersists.append(r(dsk, *s))
return repack(postpersists)
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> d.compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> d.compute({'a': a, 'b': b, 'c': 1})
({'a': 45, 'b': 4.5, 'c': 1},)
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None),
collections=collections,
get=kwargs.pop("get", None),
)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postcomputes = [], []
for x in collections:
keys.append(x.__dask_keys__())
postcomputes.append(x.__dask_postcompute__())
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color : {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
collapse_outputs : bool, optional
Whether to collapse output boxes, which often have empty labels.
Default is False.
verbose : bool, optional
Whether to label output and input boxes even if the data aren't chunked.
Beware: these labels can get very long. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
filename = kwargs.pop("filename", "mydask")
optimize_graph = kwargs.pop("optimize_graph", False)
dsks = []
args3 = []
for arg in args:
if isinstance(arg, (list, tuple, set)):
for a in arg:
if isinstance(a, Mapping):
dsks.append(a)
if is_dask_collection(a):
args3.append(a)
else:
if isinstance(arg, Mapping):
dsks.append(arg)
if is_dask_collection(arg):
args3.append(arg)
dsk = dict(collections_to_dsk(args3, optimize_graph=optimize_graph))
for d in dsks:
dsk.update(d)
color = kwargs.get("color")
if color == "order":
from .order import order
import matplotlib.pyplot as plt
o = order(dsk)
try:
cmap = kwargs.pop("cmap")
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
mx = max(o.values()) + 1
colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}
kwargs["function_attributes"] = {
k: {"color": v, "label": str(o[k])} for k, v in colors.items()
}
kwargs["data_attributes"] = {k: {"color": v} for k, v in colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, **kwargs):
"""Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None), collections=collections
)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(
collections, optimize_graph=optimize_graph, **kwargs
)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
def tokenize(*args, **kwargs):
"""Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
normalize_token = Dispatch()
normalize_token.register(
(int, float, str, bytes, type(None), type, slice, complex, type(Ellipsis)), identity
)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
def func(seq):
try:
return list(map(normalize_token, seq))
except RecursionError:
return str(uuid.uuid4())
return type(seq).__name__, func(seq)
@normalize_token.register(literal)
def normalize_literal(lit):
return "literal", normalize_token(lit())
@normalize_token.register(range)
def normalize_range(r):
return list(map(normalize_token, [r.start, r.stop, r.step]))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, "__dask_tokenize__", None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, Compose):
first = getattr(func, "first", None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, (partial, curry)):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple(
(k, normalize_token(v)) for k, v in sorted(func.keywords.items())
)
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b"__main__" not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
values = ind.array
return [ind.name, normalize_token(values)]
@normalize_token.register(pd.MultiIndex)
def normalize_index(ind):
codes = ind.codes
return (
[ind.name]
+ [normalize_token(x) for x in ind.levels]
+ [normalize_token(x) for x in codes]
)
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes), normalize_token(cat.dtype)]
@normalize_token.register(pd.arrays.PeriodArray)
@normalize_token.register(pd.arrays.DatetimeArray)
@normalize_token.register(pd.arrays.TimedeltaArray)
def normalize_period_array(arr):
return [normalize_token(arr.asi8), normalize_token(arr.dtype)]
@normalize_token.register(pd.arrays.IntervalArray)
def normalize_interval_array(arr):
return [
normalize_token(arr.left),
normalize_token(arr.right),
normalize_token(arr.closed),
]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [
s.name,
s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index),
]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data.extend([df.columns, df.index])
return list(map(normalize_token, data))
@normalize_token.register(pd.api.extensions.ExtensionArray)
def normalize_extension_array(arr):
import numpy as np
return normalize_token(np.asarray(arr))
# Dtypes
@normalize_token.register(pd.api.types.CategoricalDtype)
def normalize_categorical_dtype(dtype):
return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]
@normalize_token.register(pd.api.extensions.ExtensionDtype)
def normalize_period_dtype(dtype):
return normalize_token(dtype.name)
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (x.item(), x.dtype)
if hasattr(x, "mode") and getattr(x, "filename", None):
if hasattr(x.base, "ctypes"):
offset = (
x.ctypes.get_as_parameter().value
- x.base.ctypes.get_as_parameter().value
)
else:
offset = 0 # root memmap's have mmap object as base
if hasattr(
x, "offset"
): # offset numpy used while opening, and not the offset to the beginning of the file
offset += getattr(x, "offset")
return (
x.filename,
os.path.getmtime(x.filename),
x.dtype,
x.shape,
x.strides,
offset,
)
if x.dtype.hasobject:
try:
try:
# string fast-path
data = hash_buffer_hex(
"-".join(x.flat).encode(
encoding="utf-8", errors="surrogatepass"
)
)
except UnicodeDecodeError:
# bytes fast-path
data = hash_buffer_hex(b"-".join(x.flat))
except (TypeError, UnicodeDecodeError):
try:
data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
except Exception:
# pickling not supported, use UUID4-based fallback
data = uuid.uuid4().hex
else:
try:
data = hash_buffer_hex(x.ravel(order="K").view("i1"))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order="K").view("i1"))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return "np." + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return (
type(x).__name__,
normalize_seq((normalize_token(getattr(x, key)) for key in attrs)),
)
for cls, attrs in [
(sp.dia_matrix, ("data", "offsets", "shape")),
(sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")),
(sp.coo_matrix, ("data", "row", "col", "shape")),
(sp.csr_matrix, ("data", "indices", "indptr", "shape")),
(sp.csc_matrix, ("data", "indices", "indptr", "shape")),
(sp.lil_matrix, ("data", "rows", "shape")),
]:
normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
"""Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = "0" * (6 - len(h)) + h
return "#" + h
named_schedulers = {
"sync": local.get_sync,
"synchronous": local.get_sync,
"single-threaded": local.get_sync,
"threads": threaded.get,
"threading": threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update(
{
"processes": dask_multiprocessing.get,
"multiprocessing": dask_multiprocessing.get,
}
)
get_err_msg = """
The get= keyword has been removed.
Please use the scheduler= keyword instead with the name of
the desired scheduler like 'threads' or 'processes'
x.compute(scheduler='single-threaded')
x.compute(scheduler='threads')
x.compute(scheduler='processes')
or with a function that takes the graph and keys
x.compute(scheduler=my_scheduler_function)
or with a Dask client
x.compute(scheduler=client)
""".strip()
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
"""Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in scheduler= parameters
2. Passing these into global configuration
3. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get:
raise TypeError(get_err_msg)
if scheduler is not None:
if callable(scheduler):
return scheduler
elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"):
return scheduler.get
elif scheduler.lower() in named_schedulers:
return named_schedulers[scheduler.lower()]
elif scheduler.lower() in ("dask.distributed", "distributed"):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError(
"Expected one of [distributed, %s]"
% ", ".join(sorted(named_schedulers))
)
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get("scheduler", None):
return get_scheduler(scheduler=config.get("scheduler", None))
if config.get("get", None):
raise ValueError(get_err_msg)
if getattr(thread_state, "key", False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError(
"Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `dask.config.set`."
)
return get
return None
def wait(x, timeout=None, return_when="ALL_COMPLETED"):
"""Wait until computation has finished
This is a compatibility alias for ``dask.distributed.wait``.
If it is applied onto Dask collections without Dask Futures or if Dask
distributed is not installed then it is a no-op
"""
try:
from distributed import wait
return wait(x, timeout=timeout, return_when=return_when)
except (ImportError, ValueError):
return x
def get_collection_name(collection) -> str:
"""Infer the collection name from the dask keys, under the assumption that all keys
are either tuples with matching first element, and that element is a string, or
there is exactly one key and it is a string.
Examples
--------
>>> a.__dask_keys__() # doctest: +SKIP
["foo-123"] # doctest: +SKIP
>>> get_collection_name(a) # doctest: +SKIP
"foo-123" # doctest: +SKIP
>>> b.__dask_keys__() # doctest: +SKIP
[[("foo-123", 0, 0), ("foo-123", 0, 1)], [("foo-123", 1, 0), ("foo-123", 1, 1)]] # doctest: +SKIP
>>> get_collection_name(b) # doctest: +SKIP
"foo-123" # doctest: +SKIP
"""
if not is_dask_collection(collection):
raise TypeError(f"Expected Dask collection; got {type(collection)}")
try:
key = next(flatten(collection.__dask_keys__()))
except StopIteration:
# Collection with no keys; this is a legitimate use case but, at the moment of
# writing, can only happen with third-party collections
raise KeyError("Dask collection has no keys")
if isinstance(key, tuple) and key and isinstance(key[0], str):
return key[0]
if isinstance(key, str):
return key
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
def replace_name_in_key(key, name: str):
"""Given a dask key, which must be either a single string or a tuple whose first
element is a string (commonly referred to as a collection's 'name'), replace the
name with a new one.
Examples
--------
>>> replace_name_in_key("foo", "bar")
'bar'
>>> replace_name_in_key(("foo-123", 1, 2), "bar-456")
('bar-456', 1, 2)
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return (name,) + key[1:]
if isinstance(key, str):
return name
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
def clone_key(key, seed):
"""Clone a key from a Dask collection, producing a new key with the same prefix and
indices and a token which a deterministic function of the previous token and seed.
Examples
--------
>>> clone_key("inc-cbb1eca3bafafbb3e8b2419c4eebb387", 123) # doctest: +SKIP
'inc-1d291de52f5045f8a969743daea271fd' # doctest: +SKIP
>>> clone_key(("sum-cbb1eca3bafafbb3e8b2419c4eebb387", 4, 3), 123) # doctest: +SKIP
('sum-f0962cc58ef4415689a86cc1d4cc1723', 4, 3) # doctest: +SKIP
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return (clone_key(key[0], seed),) + key[1:]
if isinstance(key, str):
prefix = key_split(key)
token = key[len(prefix) + 1 :]
if token:
return prefix + "-" + tokenize(token, seed)
else:
return tokenize(key, seed)
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
|
the-stack_106_26471 | from sklearn.preprocessing import MinMaxScaler
import settings
import pandas as pd
import numpy as np
from sklearn.metrics import average_precision_score, recall_score
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
def run_xgboost_ml(df_train, df_test, platform_folder):
target = 'Vid_type_mean'
df_train.drop(['Vid_pltform_ML_mean'], axis=1, inplace=True)
df_test.drop(['Vid_pltform_ML_mean'], axis=1, inplace=True)
counts = np.asarray(df_train[target].value_counts())
base_score = counts[1] / (counts[0] + counts[1])
predictors = [x for x in df_train.columns if x not in [target]]
x_train = df_train[predictors]
y_train = df_train[target]
x_test = df_test[predictors]
y_test = df_test[target]
# select mode features for xgboost
ns, md, mcw, g, ss, csbt, sps, rl, ra = select_params_xgboost(platform_folder)
xgboost_model = XGBClassifier(base_score=base_score,
learning_rate=0.01,
n_estimators=ns,
max_depth=md,
min_child_weight=mcw,
gamma=g,
subsample=ss, # 0.9,
colsample_bytree=csbt, # 0.8,
objective='binary:logistic',
nthread=1,
scale_pos_weight=sps, # 2.0,
reg_lambda=rl,
reg_alpha=ra,
seed=123)
xgboost_model.fit(x_train, y_train, eval_metric='error')
# get the performance
y_pred = xgboost_model.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
precsion = average_precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred, average='binary')
# print('mean acc ' + platform_folder + ' ' + str(accuracy))
return accuracy, precsion, recall
# based on the traffic_type (platform) select the classifier parameters.
def select_params_xgboost(platform):
# ns,md, mcw, g, ss, csbt, sps, rl, ra
if platform == 'yt':
return [1000, 3, 2, 1, 0.6, 0.5, 1.0, 0.1, 0.1]
elif platform == 'fb':
return [1000, 7, 3, 1, 0.9, 0.6, 1.0, 0.1, 0.1]
else:
return [1000, 7, 2, 1, 0.8, 0.6, 1.0, 1, 0.1]
# split the data to train and test split based on the video id
def split_by_control_param(df, platform, random_seed):
# read the NC data
Sydney = 1
Random = 0
test_df = df[
(df['location'] == Sydney)&
(df['bandwidth'] == Random)&
(df['Vid_pltform_mean'] == platform)]
train_df = df[
(df['location'] == Sydney)&
(df['bandwidth'] == Random)&
(df['Vid_pltform_mean'] == platform)]
train_df_360 = train_df[train_df['Vid_type_mean'] == settings.V_360]
train_df_normal = train_df[train_df['Vid_type_mean'] == settings.V_NORMAL]
test_df_360 = test_df[test_df['Vid_type_mean'] == settings.V_360]
test_df_normal = test_df[test_df['Vid_type_mean'] == settings.V_NORMAL]
# take the vid numbers separaetely
vid_numbers_360_test = np.unique(test_df_360['Vid_number_mean'].values)
vid_numbers_normal_test = np.unique(test_df_normal['Vid_number_mean'].values)
vid_numbers_360_train = np.unique(train_df_360['Vid_number_mean'].values)
vid_numbers_normal_train = np.unique(train_df_normal['Vid_number_mean'].values)
vid_numbers_360 = np.union1d(vid_numbers_360_test, vid_numbers_360_train)
vid_numbers_normal = np.union1d(vid_numbers_normal_test, vid_numbers_normal_train)
train_test_split_value = 5
# randomly select the video ids to be in trian and test sets
np.random.seed(random_seed)
test_360_ind = list(
np.random.choice(vid_numbers_360, len(vid_numbers_360) // train_test_split_value, replace=False))
if len(test_360_ind) == 0:
test_360_ind = [vid_numbers_360[0]]
np.random.seed(random_seed)
test_normal_ind = list(
np.random.choice(vid_numbers_normal, len(vid_numbers_normal) // train_test_split_value, replace=False))
if len(test_normal_ind) == 0:
test_normal_ind = [vid_numbers_normal[0]]
train_360_ind = list(set(vid_numbers_360) - set(test_360_ind))
train_normal_ind = list(set(vid_numbers_normal) - set(test_normal_ind))
train_df_360_new = train_df_360.loc[train_df_360['Vid_number_mean'].isin(list(train_360_ind))]
train_df_normal_new = train_df_normal.loc[train_df_normal['Vid_number_mean'].isin(list(train_normal_ind))]
test_df_360_new = test_df_360.loc[test_df_360['Vid_number_mean'].isin(list(test_360_ind))]
test_df_normal_new = test_df_normal.loc[test_df_normal['Vid_number_mean'].isin(list(test_normal_ind))]
# combine the 360 and normal video traces to train and test sets
train_df_new = pd.concat([train_df_360_new, train_df_normal_new])
train_df_new = shuffle(train_df_new, random_state=random_seed)
test_df_new = pd.concat([test_df_360_new, test_df_normal_new])
test_df_new = shuffle(test_df_new, random_state=random_seed)
return train_df_new, test_df_new
|
the-stack_106_26473 | from lazydata.cli.commands.BaseCommand import BaseCommand
from lazydata.storage.cloudsetup import setup_aws_credentials
from lazydata.storage.localsetup import setup_local_folder
class ConfigCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('backend', type=str, help='The backend to configure, currently only `aws` supported')
return parser
def handle(self, args):
if args.backend == "aws":
setup_aws_credentials()
elif args.backend in ['drive','local','folder','mount']:
setup_local_folder()
else:
print("ERROR: Unrecognised backend `%s`. Currently supported: `aws`, `folder`" % args.backend)
|
the-stack_106_26476 | import os
import string
import random
import base64
import binascii
import json
from datetime import date, datetime
def file_write(data, path, filename):
os.makedirs(path, exist_ok=True)
if data:
with open('{}/{}.json'.format(path, filename), 'w+') as outfile:
outfile.write(data)
outfile.close()
def json_serializer(obj: {}):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
# failure fallback
if not isinstance(obj, str):
return str(obj)
raise TypeError("Object of type '%s' is not JSON serializable" % type(obj).__name__)
def bin2ascii(v):
decoded = base64.b64decode(v.split(':').pop())
replaced = binascii.hexlify(decoded)
return bool(int(replaced, 16))
def generate_password(size=8, chars=string.ascii_letters + string.digits + string.punctuation):
return ''.join(random.choice(chars) for _ in range(size))
def get_mapping_for_table(table_name, path='migration_result'):
with open(path + '/mapping/' + table_name + '.json') as json_file:
data = json.load(json_file)
return data
def convert_string_to_time(date_string, timezone):
import dateutil.parser
date_time_obj = dateutil.parser.parse(date_string, ignoretz=True)
# from datetime import datetime
import pytz
#
# date_time_obj = datetime.strptime(date_string[:26], '%Y-%m-%d %H:%M:%S.%f')
date_time_obj_timezone = pytz.timezone(timezone).localize(date_time_obj)
return date_time_obj_timezone
def sortedWalk(top, topdown=True, onerror=None):
from os.path import join, isdir, islink
names = os.listdir(top)
names.sort()
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not os.path.islink(path):
for x in sortedWalk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
|
the-stack_106_26477 | # Author: Mark Wronkiewicz <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import numpy as np
import sys
import scipy
from numpy.testing import assert_equal, assert_allclose
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
from distutils.version import LooseVersion
from mne import compute_raw_covariance, pick_types
from mne.chpi import read_head_pos, filter_chpi
from mne.forward import _prep_meg_channels
from mne.cov import _estimate_rank_meeg_cov
from mne.datasets import testing
from mne.io import (read_raw_fif, proc_history, read_info, read_raw_bti,
read_raw_kit, _BaseRaw)
from mne.preprocessing.maxwell import (
maxwell_filter, _get_n_moments, _sss_basis_basic, _sh_complex_to_real,
_sh_real_to_complex, _sh_negate, _bases_complex_to_real, _trans_sss_basis,
_bases_real_to_complex, _sph_harm, _prep_mf_coils)
from mne.tests.common import assert_meg_snr
from mne.utils import (_TempDir, run_tests_if_main, slow_test, catch_logging,
requires_version, object_diff, buggy_mkl_svd)
from mne.externals.six import PY3
warnings.simplefilter('always') # Always throw warnings
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
sss_std_fname = pre + 'stdOrigin_raw_sss.fif'
sss_nonstd_fname = pre + 'nonStdOrigin_raw_sss.fif'
sss_bad_recon_fname = pre + 'badRecon_raw_sss.fif'
sss_reg_in_fname = pre + 'regIn_raw_sss.fif'
sss_fine_cal_fname = pre + 'fineCal_raw_sss.fif'
sss_ctc_fname = pre + 'crossTalk_raw_sss.fif'
sss_trans_default_fname = pre + 'transDefault_raw_sss.fif'
sss_trans_sample_fname = pre + 'transSample_raw_sss.fif'
sss_st1FineCalCrossTalkRegIn_fname = \
pre + 'st1FineCalCrossTalkRegIn_raw_sss.fif'
sss_st1FineCalCrossTalkRegInTransSample_fname = \
pre + 'st1FineCalCrossTalkRegInTransSample_raw_sss.fif'
sss_movecomp_fname = pre + 'movecomp_raw_sss.fif'
sss_movecomp_reg_in_fname = pre + 'movecomp_regIn_raw_sss.fif'
sss_movecomp_reg_in_st4s_fname = pre + 'movecomp_regIn_st4s_raw_sss.fif'
erm_fname = pre + 'erm_raw.fif'
sss_erm_std_fname = pre + 'erm_devOrigin_raw_sss.fif'
sss_erm_reg_in_fname = pre + 'erm_regIn_raw_sss.fif'
sss_erm_fine_cal_fname = pre + 'erm_fineCal_raw_sss.fif'
sss_erm_ctc_fname = pre + 'erm_crossTalk_raw_sss.fif'
sss_erm_st_fname = pre + 'erm_st1_raw_sss.fif'
sss_erm_st1FineCalCrossTalk_fname = pre + 'erm_st1FineCalCrossTalk_raw_sss.fif'
sss_erm_st1FineCalCrossTalkRegIn_fname = \
pre + 'erm_st1FineCalCrossTalkRegIn_raw_sss.fif'
sample_fname = op.join(data_path, 'MEG', 'sample_audvis_trunc_raw.fif')
sss_samp_reg_in_fname = op.join(data_path, 'SSS',
'sample_audvis_trunc_regIn_raw_sss.fif')
sss_samp_fname = op.join(data_path, 'SSS', 'sample_audvis_trunc_raw_sss.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
bases_fname = op.join(sss_path, 'sss_data.mat')
fine_cal_fname = op.join(sss_path, 'sss_cal_3053.dat')
fine_cal_fname_3d = op.join(sss_path, 'sss_cal_3053_3d.dat')
ctc_fname = op.join(sss_path, 'ct_sparse.fif')
fine_cal_mgh_fname = op.join(sss_path, 'sss_cal_mgh.dat')
ctc_mgh_fname = op.join(sss_path, 'ct_sparse_mgh.fif')
sample_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
triux_path = op.join(data_path, 'SSS', 'TRIUX')
tri_fname = op.join(triux_path, 'triux_bmlhus_erm_raw.fif')
tri_sss_fname = op.join(triux_path, 'triux_bmlhus_erm_raw_sss.fif')
tri_sss_reg_fname = op.join(triux_path, 'triux_bmlhus_erm_regIn_raw_sss.fif')
tri_sss_st4_fname = op.join(triux_path, 'triux_bmlhus_erm_st4_raw_sss.fif')
tri_sss_ctc_fname = op.join(triux_path, 'triux_bmlhus_erm_ctc_raw_sss.fif')
tri_sss_cal_fname = op.join(triux_path, 'triux_bmlhus_erm_cal_raw_sss.fif')
tri_sss_ctc_cal_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_raw_sss.fif')
tri_sss_ctc_cal_reg_in_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_regIn_raw_sss.fif')
tri_ctc_fname = op.join(triux_path, 'ct_sparse_BMLHUS.fif')
tri_cal_fname = op.join(triux_path, 'sss_cal_BMLHUS.dat')
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
int_order, ext_order = 8, 3
mf_head_origin = (0., 0., 0.04)
mf_meg_origin = (0., 0.013, -0.006)
# otherwise we can get SVD error
requires_svd_convergence = requires_version('scipy', '0.12')
# 30 random bad MEG channels (20 grad, 10 mag) that were used in generation
bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
def _assert_n_free(raw_sss, lower, upper=None):
"""Check the DOF."""
upper = lower if upper is None else upper
n_free = raw_sss.info['proc_history'][0]['max_info']['sss_info']['nfree']
assert_true(lower <= n_free <= upper,
'nfree fail: %s <= %s <= %s' % (lower, n_free, upper))
def read_crop(fname, lims=(0, None)):
"""Read and crop."""
return read_raw_fif(fname, allow_maxshield='yes',
add_eeg_ref=False).crop(*lims)
@slow_test
@testing.requires_testing_data
def test_movement_compensation():
"""Test movement compensation."""
temp_dir = _TempDir()
lims = (0, 4, False)
raw = read_crop(raw_fname, lims).load_data()
head_pos = read_head_pos(pos_fname)
#
# Movement compensation, no regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin,
regularize=None, bad_condition='ignore')
assert_meg_snr(raw_sss, read_crop(sss_movecomp_fname, lims),
4.6, 12.4, chpi_med_tol=58)
# IO
temp_fname = op.join(temp_dir, 'test_raw_sss.fif')
raw_sss.save(temp_fname)
raw_sss = read_crop(temp_fname)
assert_meg_snr(raw_sss, read_crop(sss_movecomp_fname, lims),
4.6, 12.4, chpi_med_tol=58)
#
# Movement compensation, regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin)
assert_meg_snr(raw_sss, read_crop(sss_movecomp_reg_in_fname, lims),
0.5, 1.9, chpi_med_tol=121)
#
# Movement compensation, regularization, tSSS at the end
#
raw_nohpi = filter_chpi(raw.copy())
with warnings.catch_warnings(record=True) as w: # untested feature
raw_sss_mv = maxwell_filter(raw_nohpi, head_pos=head_pos,
st_duration=4., origin=mf_head_origin,
st_fixed=False)
assert_equal(len(w), 1)
assert_true('is untested' in str(w[0].message))
# Neither match is particularly good because our algorithm actually differs
assert_meg_snr(raw_sss_mv, read_crop(sss_movecomp_reg_in_st4s_fname, lims),
0.6, 1.3)
tSSS_fname = op.join(sss_path, 'test_move_anon_st4s_raw_sss.fif')
assert_meg_snr(raw_sss_mv, read_crop(tSSS_fname, lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(read_crop(sss_movecomp_reg_in_st4s_fname),
read_crop(tSSS_fname), 0.8, 1.0, chpi_med_tol=None)
#
# Movement compensation, regularization, tSSS at the beginning
#
raw_sss_mc = maxwell_filter(raw_nohpi, head_pos=head_pos, st_duration=4.,
origin=mf_head_origin)
assert_meg_snr(raw_sss_mc, read_crop(tSSS_fname, lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(raw_sss_mc, raw_sss_mv, 0.6, 1.4)
# some degenerate cases
raw_erm = read_crop(erm_fname)
assert_raises(ValueError, maxwell_filter, raw_erm, coord_frame='meg',
head_pos=head_pos) # can't do ERM file
assert_raises(ValueError, maxwell_filter, raw,
head_pos=head_pos[:, :9]) # bad shape
assert_raises(TypeError, maxwell_filter, raw, head_pos='foo') # bad type
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos[::-1])
head_pos_bad = head_pos.copy()
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 1e-2
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
head_pos_bad = head_pos.copy()
head_pos_bad[0, 4] = 1. # off by more than 1 m
with warnings.catch_warnings(record=True) as w:
maxwell_filter(raw, head_pos=head_pos_bad, bad_condition='ignore')
assert_true(any('greater than 1 m' in str(ww.message) for ww in w))
# make sure numerical error doesn't screw it up, though
head_pos_bad = head_pos.copy()
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 5e-4
raw_sss_tweak = maxwell_filter(raw, head_pos=head_pos_bad,
origin=mf_head_origin)
assert_meg_snr(raw_sss_tweak, raw_sss, 2., 10., chpi_med_tol=11)
@slow_test
def test_other_systems():
"""Test Maxwell filtering on KIT, BTI, and CTF files."""
# KIT
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
with warnings.catch_warnings(record=True): # head fit
assert_raises(RuntimeError, maxwell_filter, raw_kit)
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 65, 65)
raw_sss_auto = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, mag_scale='auto')
assert_allclose(raw_sss._data, raw_sss_auto._data)
# XXX this KIT origin fit is terrible! Eventually we should get a
# corrected HSP file with proper coverage
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
assert_raises(RuntimeError, maxwell_filter, raw_kit,
ignore_ref=True, regularize=None) # bad condition
raw_sss = maxwell_filter(raw_kit, origin='auto',
ignore_ref=True, bad_condition='warning',
verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
assert_true('more than 20 mm from' in log_file)
# fits can differ slightly based on scipy version, so be lenient here
_assert_n_free(raw_sss, 28, 34) # bad origin == brutal reg
# Let's set the origin
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, bad_condition='warning',
regularize=None, verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
_assert_n_free(raw_sss, 80)
# Now with reg
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, verbose=True)
log_file = log_file.getvalue()
assert_true('badly conditioned' not in log_file)
_assert_n_free(raw_sss, 65)
# BTi
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
with warnings.catch_warnings(record=True): # weght table
raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
picks = pick_types(raw_bti.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw_bti[picks][0] ** 2))
raw_sss = maxwell_filter(raw_bti)
_assert_n_free(raw_sss, 70)
_assert_shielding(raw_sss, power, 0.5)
raw_sss_auto = maxwell_filter(raw_bti, mag_scale='auto', verbose=True)
_assert_shielding(raw_sss_auto, power, 0.7)
# CTF
raw_ctf = read_crop(fname_ctf_raw)
assert_equal(raw_ctf.compensation_grade, 3)
assert_raises(RuntimeError, maxwell_filter, raw_ctf) # compensated
raw_ctf.apply_gradient_compensation(0)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
_assert_shielding(raw_sss, raw_ctf, 1.8)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
_assert_shielding(raw_sss, raw_ctf, 12)
raw_sss_auto = maxwell_filter(raw_ctf, origin=(0., 0., 0.04),
ignore_ref=True, mag_scale='auto')
assert_allclose(raw_sss._data, raw_sss_auto._data)
def test_spherical_harmonics():
"""Test spherical harmonic functions."""
from scipy.special import sph_harm
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
# As of Oct 16, 2015, Anancoda has a bug in scipy due to old compilers (?):
# https://github.com/ContinuumIO/anaconda-issues/issues/479
if (PY3 and
LooseVersion(scipy.__version__) >= LooseVersion('0.15') and
'Continuum Analytics' in sys.version):
raise SkipTest('scipy sph_harm bad in Py3k on Anaconda')
# Test our basic spherical harmonics
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
sph_scipy = sph_harm(order, degree, az, pol)
assert_allclose(sph, sph_scipy, atol=1e-7)
def test_spherical_conversions():
"""Test spherical harmonic conversions."""
# Test our real<->complex conversion functions
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
# ensure that we satisfy the conjugation property
assert_allclose(_sh_negate(sph, order),
_sph_harm(-order, degree, az, pol))
# ensure our conversion functions work
sph_real_pos = _sh_complex_to_real(sph, order)
sph_real_neg = _sh_complex_to_real(sph, -order)
sph_2 = _sh_real_to_complex([sph_real_pos, sph_real_neg], order)
assert_allclose(sph, sph_2, atol=1e-7)
@testing.requires_testing_data
def test_multipolar_bases():
"""Test multipolar moment basis calculation using sensor information."""
from scipy.io import loadmat
# Test our basis calculations
info = read_info(raw_fname)
coils = _prep_meg_channels(info, accurate=True, elekta_defs=True,
do_es=True)[0]
# Check against a known benchmark
sss_data = loadmat(bases_fname)
exp = dict(int_order=int_order, ext_order=ext_order)
for origin in ((0, 0, 0.04), (0, 0.02, 0.02)):
o_str = ''.join('%d' % (1000 * n) for n in origin)
exp.update(origin=origin)
S_tot = _sss_basis_basic(exp, coils, method='alternative')
# Test our real<->complex conversion functions
S_tot_complex = _bases_real_to_complex(S_tot, int_order, ext_order)
S_tot_round = _bases_complex_to_real(S_tot_complex,
int_order, ext_order)
assert_allclose(S_tot, S_tot_round, atol=1e-7)
S_tot_mat = np.concatenate([sss_data['Sin' + o_str],
sss_data['Sout' + o_str]], axis=1)
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
assert_allclose(S_tot, S_tot_mat_real, rtol=1e-4, atol=1e-8)
# Now normalize our columns
S_tot /= np.sqrt(np.sum(S_tot * S_tot, axis=0))[np.newaxis]
S_tot_complex /= np.sqrt(np.sum(
(S_tot_complex * S_tot_complex.conj()).real, axis=0))[np.newaxis]
# Check against a known benchmark
S_tot_mat = np.concatenate([sss_data['SNin' + o_str],
sss_data['SNout' + o_str]], axis=1)
# Check this roundtrip
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
# Now test our optimized version
S_tot = _sss_basis_basic(exp, coils)
S_tot_fast = _trans_sss_basis(
exp, all_coils=_prep_mf_coils(info), trans=info['dev_head_t'])
# there are some sign differences for columns (order/degrees)
# in here, likely due to Condon-Shortley. Here we use a
# Magnetometer channel to figure out the flips because the
# gradiometer channels have effectively zero values for first three
# external components (i.e., S_tot[grad_picks, 80:83])
flips = (np.sign(S_tot_fast[2]) != np.sign(S_tot[2]))
flips = 1 - 2 * flips
assert_allclose(S_tot, S_tot_fast * flips, atol=1e-16)
@testing.requires_testing_data
def test_basic():
"""Test Maxwell filter basic version."""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = read_crop(raw_fname, (0., 1.))
raw_err = read_crop(raw_fname).apply_proj()
raw_erm = read_crop(erm_fname)
assert_raises(RuntimeError, maxwell_filter, raw_err)
assert_raises(TypeError, maxwell_filter, 1.) # not a raw
assert_raises(ValueError, maxwell_filter, raw, int_order=20) # too many
n_int_bases = int_order ** 2 + 2 * int_order
n_ext_bases = ext_order ** 2 + 2 * ext_order
nbases = n_int_bases + n_ext_bases
# Check number of bases computed correctly
assert_equal(_get_n_moments([int_order, ext_order]).sum(), nbases)
# Test SSS computation at the standard head origin
assert_equal(len(raw.info['projs']), 12) # 11 MEG projs + 1 AVG EEG
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_equal(len(raw_sss.info['projs']), 1) # avg EEG
assert_equal(raw_sss.info['projs'][0]['desc'], 'Average EEG reference')
assert_meg_snr(raw_sss, read_crop(sss_std_fname), 200., 1000.)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_equal(len(py_cal), 0)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_equal(len(py_ctc), 0)
py_st = raw_sss.info['proc_history'][0]['max_info']['max_st']
assert_equal(len(py_st), 0)
assert_raises(RuntimeError, maxwell_filter, raw_sss)
# Test SSS computation at non-standard head origin
raw_sss = maxwell_filter(raw, origin=[0., 0.02, 0.02], regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, read_crop(sss_nonstd_fname), 250., 700.)
# Test SSS computation at device origin
sss_erm_std = read_crop(sss_erm_std_fname)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg',
origin=mf_meg_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_erm_std, 100., 900.)
for key in ('job', 'frame'):
vals = [x.info['proc_history'][0]['max_info']['sss_info'][key]
for x in [raw_sss, sss_erm_std]]
assert_equal(vals[0], vals[1])
# Check against SSS functions from proc_history
sss_info = raw_sss.info['proc_history'][0]['max_info']
assert_equal(_get_n_moments(int_order),
proc_history._get_sss_rank(sss_info))
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, coord_frame='foo')
assert_raises(ValueError, maxwell_filter, raw, origin='foo')
assert_raises(ValueError, maxwell_filter, raw, origin=[0] * 4)
assert_raises(ValueError, maxwell_filter, raw, mag_scale='foo')
@testing.requires_testing_data
def test_maxwell_filter_additional():
"""Test processing of Maxwell filtered data."""
# TODO: Future tests integrate with mne/io/tests/test_proc_history
# Load testing data (raw, SSS std origin, SSS non-standard origin)
data_path = op.join(testing.data_path(download=False))
file_name = 'test_move_anon'
raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
# Use 2.0 seconds of data to get stable cov. estimate
raw = read_crop(raw_fname, (0., 2.))
# Get MEG channels, compute Maxwell filtered data
raw.load_data()
raw.pick_types(meg=True, eeg=False)
int_order = 8
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
# Test io on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'test_raw_sss.fif')
raw_sss.save(test_outname)
raw_sss_loaded = read_crop(test_outname).load_data()
# Some numerical imprecision since save uses 'single' fmt
assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
rtol=1e-6, atol=1e-20)
# Test rank of covariance matrices for raw and SSS processed data
cov_raw = compute_raw_covariance(raw)
cov_sss = compute_raw_covariance(raw_sss)
scalings = None
cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
scalings)
assert_equal(cov_raw_rank, raw.info['nchan'])
assert_equal(cov_sss_rank, _get_n_moments(int_order))
@slow_test
@testing.requires_testing_data
def test_bads_reconstruction():
"""Test Maxwell filter reconstruction of bad channels."""
raw = read_crop(raw_fname, (0., 1.))
raw.info['bads'] = bads
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, read_crop(sss_bad_recon_fname), 300.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_maxwell():
"""Test Maxwell filter (tSSS) spatiotemporal processing."""
# Load raw testing data
raw = read_crop(raw_fname)
# Test that window is less than length of data
assert_raises(ValueError, maxwell_filter, raw, st_duration=1000.)
# Check both 4 and 10 seconds because Elekta handles them differently
# This is to ensure that std/non-std tSSS windows are correctly handled
st_durations = [4., 10.]
tols = [325., 200.]
for st_duration, tol in zip(st_durations, tols):
# Load tSSS data depending on st_duration and get data
tSSS_fname = op.join(sss_path,
'test_move_anon_st%0ds_raw_sss.fif' % st_duration)
tsss_bench = read_crop(tSSS_fname)
# Because Elekta's tSSS sometimes(!) lumps the tail window of data
# onto the previous buffer if it's shorter than st_duration, we have to
# crop the data here to compensate for Elekta's tSSS behavior.
if st_duration == 10.:
tsss_bench.crop(0, st_duration, copy=False)
# Test sss computation at the standard head origin. Same cropping issue
# as mentioned above.
if st_duration == 10.:
raw_tsss = maxwell_filter(raw.crop(0, st_duration),
origin=mf_head_origin,
st_duration=st_duration, regularize=None,
bad_condition='ignore')
else:
raw_tsss = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose=True)
raw_tsss_2 = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', st_fixed=False,
verbose=True)
assert_meg_snr(raw_tsss, raw_tsss_2, 100., 1000.)
assert_equal(raw_tsss.estimate_rank(), 140)
assert_equal(raw_tsss_2.estimate_rank(), 140)
assert_meg_snr(raw_tsss, tsss_bench, tol)
py_st = raw_tsss.info['proc_history'][0]['max_info']['max_st']
assert_true(len(py_st) > 0)
assert_equal(py_st['buflen'], st_duration)
assert_equal(py_st['subspcorr'], 0.98)
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, st_duration=10.,
st_correlation=0.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_only():
"""Test tSSS-only processing."""
# Load raw testing data
raw = read_crop(raw_fname, (0, 2)).load_data()
picks = pick_types(raw.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw[picks][0] ** 2))
# basics
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 10)
# temporal proj will actually reduce spatial DOF with small windows!
raw_tsss = maxwell_filter(raw, st_duration=0.1, st_only=True)
assert_true(raw_tsss.estimate_rank() < 350)
_assert_shielding(raw_tsss, power, 40)
# with movement
head_pos = read_head_pos(pos_fname)
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
with warnings.catch_warnings(record=True): # st_fixed False
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos, st_fixed=False)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
# should do nothing
raw_tsss = maxwell_filter(raw, st_duration=1., st_correlation=1.,
st_only=True)
assert_allclose(raw[:][0], raw_tsss[:][0])
# degenerate
assert_raises(ValueError, maxwell_filter, raw, st_only=True) # no ST
# two-step process equivalent to single-step process
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
raw_tsss = maxwell_filter(raw_tsss)
raw_tsss_2 = maxwell_filter(raw, st_duration=1.)
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
# now also with head movement, and a bad MEG channel
assert_equal(len(raw.info['bads']), 0)
raw.info['bads'] = ['EEG001', 'MEG2623']
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw.info['bads'], ['EEG001', 'MEG2623'])
assert_equal(raw_tsss.info['bads'], ['EEG001', 'MEG2623']) # don't reset
raw_tsss = maxwell_filter(raw_tsss, head_pos=head_pos)
assert_equal(raw_tsss.info['bads'], ['EEG001']) # do reset MEG bads
raw_tsss_2 = maxwell_filter(raw, st_duration=1., head_pos=head_pos)
assert_equal(raw_tsss_2.info['bads'], ['EEG001'])
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
@testing.requires_testing_data
def test_fine_calibration():
"""Test Maxwell filter fine calibration."""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = read_crop(raw_fname, (0., 1.))
sss_fine_cal = read_crop(sss_fine_cal_fname)
# Test 1D SSS fine calibration
raw_sss = maxwell_filter(raw, calibration=fine_cal_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_fine_cal, 82, 611)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_true(py_cal is not None)
assert_true(len(py_cal) > 0)
mf_cal = sss_fine_cal.info['proc_history'][0]['max_info']['sss_cal']
# we identify these differently
mf_cal['cal_chans'][mf_cal['cal_chans'][:, 1] == 3022, 1] = 3024
assert_allclose(py_cal['cal_chans'], mf_cal['cal_chans'])
assert_allclose(py_cal['cal_corrs'], mf_cal['cal_corrs'],
rtol=1e-3, atol=1e-3)
# Test 3D SSS fine calibration (no equivalent func in MaxFilter yet!)
# very low SNR as proc differs, eventually we should add a better test
raw_sss_3D = maxwell_filter(raw, calibration=fine_cal_fname_3d,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss_3D, sss_fine_cal, 1.0, 6.)
raw_ctf = read_crop(fname_ctf_raw).apply_gradient_compensation(0)
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
calibration=fine_cal_fname)
@slow_test
@testing.requires_testing_data
def test_regularization():
"""Test Maxwell filter regularization."""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
min_tols = (100., 2.6, 1.0)
med_tols = (1000., 21.4, 3.7)
origins = ((0., 0., 0.04), (0.,) * 3, (0., 0.02, 0.02))
coord_frames = ('head', 'meg', 'head')
raw_fnames = (raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_reg_in_fname, sss_erm_reg_in_fname,
sss_samp_reg_in_fname)
comp_tols = [0, 1, 4]
for ii, rf in enumerate(raw_fnames):
raw = read_crop(rf, (0., 1.))
sss_reg_in = read_crop(sss_fnames[ii])
# Test "in" regularization
raw_sss = maxwell_filter(raw, coord_frame=coord_frames[ii],
origin=origins[ii])
assert_meg_snr(raw_sss, sss_reg_in, min_tols[ii], med_tols[ii], msg=rf)
# check components match
_check_reg_match(raw_sss, sss_reg_in, comp_tols[ii])
def _check_reg_match(sss_py, sss_mf, comp_tol):
"""Helper to check regularization."""
info_py = sss_py.info['proc_history'][0]['max_info']['sss_info']
assert_true(info_py is not None)
assert_true(len(info_py) > 0)
info_mf = sss_mf.info['proc_history'][0]['max_info']['sss_info']
n_in = None
for inf in (info_py, info_mf):
if n_in is None:
n_in = _get_n_moments(inf['in_order'])
else:
assert_equal(n_in, _get_n_moments(inf['in_order']))
assert_equal(inf['components'][:n_in].sum(), inf['nfree'])
assert_allclose(info_py['nfree'], info_mf['nfree'],
atol=comp_tol, err_msg=sss_py._filenames[0])
@testing.requires_testing_data
def test_cross_talk():
"""Test Maxwell filter cross-talk cancellation."""
raw = read_crop(raw_fname, (0., 1.))
raw.info['bads'] = bads
sss_ctc = read_crop(sss_ctc_fname)
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_ctc, 275.)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_true(len(py_ctc) > 0)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw_fname)
mf_ctc = sss_ctc.info['proc_history'][0]['max_info']['sss_ctc']
del mf_ctc['block_id'] # we don't write this
assert_equal(object_diff(py_ctc, mf_ctc), '')
raw_ctf = read_crop(fname_ctf_raw).apply_gradient_compensation(0)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
raw_missing = raw.copy().crop(0, 0.1).load_data().pick_channels(
[raw.ch_names[pi] for pi in pick_types(raw.info, meg=True,
exclude=())[3:]])
with warnings.catch_warnings(record=True) as w:
maxwell_filter(raw_missing, cross_talk=ctc_fname)
assert_equal(len(w), 1)
assert_true('Not all cross-talk channels in raw' in str(w[0].message))
# MEG channels not in cross-talk
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
cross_talk=ctc_fname)
@testing.requires_testing_data
def test_head_translation():
"""Test Maxwell filter head translation."""
raw = read_crop(raw_fname, (0., 1.))
# First try with an unchanged destination
raw_sss = maxwell_filter(raw, destination=raw_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, read_crop(sss_std_fname, (0., 1.)), 200.)
# Now with default
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=mf_head_origin,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('over 25 mm' in log.getvalue())
assert_meg_snr(raw_sss, read_crop(sss_trans_default_fname), 125.)
destination = np.eye(4)
destination[2, 3] = 0.04
assert_allclose(raw_sss.info['dev_head_t']['trans'], destination)
# Now to sample's head pos
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=sample_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('= 25.6 mm' in log.getvalue())
assert_meg_snr(raw_sss, read_crop(sss_trans_sample_fname), 350.)
assert_allclose(raw_sss.info['dev_head_t']['trans'],
read_info(sample_fname)['dev_head_t']['trans'])
# Degenerate cases
assert_raises(RuntimeError, maxwell_filter, raw,
destination=mf_head_origin, coord_frame='meg')
assert_raises(ValueError, maxwell_filter, raw, destination=[0.] * 4)
# TODO: Eventually add simulation tests mirroring Taulu's original paper
# that calculates the localization error:
# http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=1495874
def _assert_shielding(raw_sss, erm_power, shielding_factor, meg='mag'):
"""Helper to assert a minimum shielding factor using empty-room power."""
picks = pick_types(raw_sss.info, meg=meg, ref_meg=False)
if isinstance(erm_power, _BaseRaw):
picks_erm = pick_types(raw_sss.info, meg=meg, ref_meg=False)
assert_allclose(picks, picks_erm)
erm_power = np.sqrt((erm_power[picks_erm][0] ** 2).sum())
sss_power = raw_sss[picks][0].ravel()
sss_power = np.sqrt(np.sum(sss_power * sss_power))
factor = erm_power / sss_power
assert_true(factor >= shielding_factor,
'Shielding factor %0.3f < %0.3f' % (factor, shielding_factor))
@buggy_mkl_svd
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_shielding_factor():
"""Test Maxwell filter shielding factor using empty room."""
raw_erm = read_crop(erm_fname).load_data()
picks = pick_types(raw_erm.info, meg='mag')
erm_power = raw_erm[picks][0]
erm_power = np.sqrt(np.sum(erm_power * erm_power))
erm_power_grad = raw_erm[pick_types(raw_erm.info, meg='grad')][0]
erm_power_grad = np.sqrt(np.sum(erm_power * erm_power))
# Vanilla SSS (second value would be for meg=True instead of meg='mag')
_assert_shielding(read_crop(sss_erm_std_fname), erm_power, 10) # 1.5)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None)
_assert_shielding(raw_sss, erm_power, 12) # 1.5)
_assert_shielding(raw_sss, erm_power_grad, 0.45, 'grad') # 1.5)
# Using different mag_scale values
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale='auto')
_assert_shielding(raw_sss, erm_power, 12)
_assert_shielding(raw_sss, erm_power_grad, 0.48, 'grad')
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale=1.) # not a good choice
_assert_shielding(raw_sss, erm_power, 7.3)
_assert_shielding(raw_sss, erm_power_grad, 0.2, 'grad')
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale=1000., bad_condition='ignore')
_assert_shielding(raw_sss, erm_power, 4.0)
_assert_shielding(raw_sss, erm_power_grad, 0.1, 'grad')
# Fine cal
_assert_shielding(read_crop(sss_erm_fine_cal_fname), erm_power, 12) # 2.0)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
calibration=fine_cal_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.0)
# Crosstalk
_assert_shielding(read_crop(sss_erm_ctc_fname), erm_power, 12) # 2.1)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.1)
# Fine cal + Crosstalk
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 13) # 2.2)
# tSSS
_assert_shielding(read_crop(sss_erm_st_fname), erm_power, 37) # 5.8)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 37) # 5.8)
# Crosstalk + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
cross_talk=ctc_fname, origin=mf_meg_origin,
st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.91)
# Fine cal + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.98)
# Fine cal + Crosstalk + tSSS
_assert_shielding(read_crop(sss_erm_st1FineCalCrossTalk_fname),
erm_power, 39) # 6.07)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname, origin=mf_meg_origin,
cross_talk=ctc_fname, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 39) # 6.05)
# Fine cal + Crosstalk + tSSS + Reg-in
_assert_shielding(read_crop(sss_erm_st1FineCalCrossTalkRegIn_fname),
erm_power, 57) # 6.97)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
origin=mf_meg_origin,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 53) # 6.64)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 58) # 7.0)
_assert_shielding(raw_sss, erm_power_grad, 1.6, 'grad')
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in',
mag_scale='auto')
_assert_shielding(raw_sss, erm_power, 51)
_assert_shielding(raw_sss, erm_power_grad, 1.5, 'grad')
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname_3d,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 54)
# Show it by rewriting the 3D as 1D and testing it
temp_dir = _TempDir()
temp_fname = op.join(temp_dir, 'test_cal.dat')
with open(fine_cal_fname_3d, 'r') as fid:
with open(temp_fname, 'w') as fid_out:
for line in fid:
fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n')
raw_sss = maxwell_filter(raw_erm, calibration=temp_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 44)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_all():
"""Test maxwell filter using all options."""
raw_fnames = (raw_fname, raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_st1FineCalCrossTalkRegIn_fname,
sss_st1FineCalCrossTalkRegInTransSample_fname,
sss_erm_st1FineCalCrossTalkRegIn_fname,
sss_samp_fname)
fine_cals = (fine_cal_fname,
fine_cal_fname,
fine_cal_fname,
fine_cal_mgh_fname)
coord_frames = ('head', 'head', 'meg', 'head')
ctcs = (ctc_fname, ctc_fname, ctc_fname, ctc_mgh_fname)
mins = (3.5, 3.5, 1.2, 0.9)
meds = (10.8, 10.4, 3.2, 6.)
st_durs = (1., 1., 1., None)
destinations = (None, sample_fname, None, None)
origins = (mf_head_origin,
mf_head_origin,
mf_meg_origin,
mf_head_origin)
for ii, rf in enumerate(raw_fnames):
raw = read_crop(rf, (0., 1.))
with warnings.catch_warnings(record=True): # head fit off-center
sss_py = maxwell_filter(
raw, calibration=fine_cals[ii], cross_talk=ctcs[ii],
st_duration=st_durs[ii], coord_frame=coord_frames[ii],
destination=destinations[ii], origin=origins[ii])
sss_mf = read_crop(sss_fnames[ii])
assert_meg_snr(sss_py, sss_mf, mins[ii], meds[ii], msg=rf)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_triux():
"""Test TRIUX system support."""
raw = read_crop(tri_fname, (0, 0.999))
raw.fix_mag_coil_types()
# standard
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None)
assert_meg_snr(sss_py, read_crop(tri_sss_fname), 37, 700)
# cross-talk
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, read_crop(tri_sss_ctc_fname), 35, 700)
# fine cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname)
assert_meg_snr(sss_py, read_crop(tri_sss_cal_fname), 31, 360)
# ctc+cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, read_crop(tri_sss_ctc_cal_fname), 31, 350)
# regularization
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in')
sss_mf = read_crop(tri_sss_reg_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# all three
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in',
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
sss_mf = read_crop(tri_sss_ctc_cal_reg_in_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# tSSS
raw = read_crop(tri_fname).fix_mag_coil_types()
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
st_duration=4., verbose=True)
assert_meg_snr(sss_py, read_crop(tri_sss_st4_fname), 700., 1600)
run_tests_if_main()
|
the-stack_106_26478 | import math
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch.optim.lr_scheduler as lr_scheduler
from dataset.dataset import DFDCDataset
from resnet.res_lstm4 import createModel
###############################################################################
# config setting #
###############################################################################
# 数据集根路径、标签路径
root_dir = 'I:/data/FF++/FF++face250/'
train_label_path = 'I:/data/FF++/FF++face250/ff_train82-s.csv'
val_label_path = 'I:/data/FF++/FF++face250/ff_validate82.csv'
ckpt = ''
log_dir='./log/res_lstm_ff/'
batch_size = 32
epochs = 400
lr = 0.0001
weight_decay = 0.3
num_classes = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_transform = {
"train": transforms.Compose([transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [1, 1, 1])
]),
"val": transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [1, 1, 1])
])
}
def main():
print("using {} device.".format(device))
assert os.path.exists(root_dir), "{} path does not exist.".format(root_dir)
# 训练集
train_dataset = DFDCDataset(train_label_path, root_dir, data_transform['train'])
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
train_num = len(train_dataset)
# 验证集
val_dataset = DFDCDataset(val_label_path, root_dir, data_transform['val'])
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
val_num = len(val_dataset)
print("using {} images for training, {} images for validation.".format(train_num,
val_num))
# Linux系统下可用
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
print('Using {} dataloader workers every process'.format(nw))
# 加载模型
net = createModel()
if ckpt:
net.load_state_dict(torch.load(ckpt))
print('Load Net state_dict from pretrain')
net.to(device)
# 正则化
decay_gp = []
nodecay_gp = []
for k, v in net.named_modules():
# print(v)
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
nodecay_gp.append(v.bias)
if isinstance(v, nn.BatchNorm2d):
nodecay_gp.append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
decay_gp.append(v.weight)
print(net.named_modules().__sizeof__(),len(decay_gp),len(nodecay_gp))
# construct an optimizer
# optimizer = torch.optim.Adam(net.parameters(), lr=lr)
optimizer = torch.optim.Adam(nodecay_gp, lr=lr)
optimizer.add_param_group({'params': decay_gp, 'weight_decay': weight_decay, 'lr': lr})
lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - 0.1) + 0.1 # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# tensorboard
tb_writer = SummaryWriter(log_dir=log_dir)
init_img = torch.zeros((1, 3, 224, 224), device=device)
tb_writer.add_graph(net, init_img)
best_acc = 0.0
train_steps = len(train_loader)
for epoch in range(epochs):
# train
net.train()
running_loss = 0.0
train_bar = tqdm(train_loader)
for step, (X, y) in enumerate(train_bar):
X, y = X.to(device), y.to(device)
optimizer.zero_grad()
logits = net(X)
label = torch.softmax(logits, dim=1)
# mse = F.mse_loss(label[:, 1].float(), y.float())
# loss = F.cross_entropy(logits, y) + mse
loss = F.cross_entropy(logits, y)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1, epochs, loss)
# print('epoch', epoch, 'loss:', loss)
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
val_bar = tqdm(val_loader)
for step, (Xv, yv) in enumerate(val_bar):
Xv, yv = Xv.to(device), yv.to(device)
outputs = net(Xv)
# loss = loss_function(outputs, test_labels)
predict_y = torch.max(outputs, dim=1)[1]
acc += torch.eq(predict_y, yv.to(device)).sum().item()
val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1,
epochs)
val_accurate = acc / val_num
print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' %
(epoch + 1, running_loss / train_steps, val_accurate))
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), './pkl/net_ff_' + str(best_acc)[0:5] + '.pkl')
scheduler.step()
tags = ["train_loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], running_loss / train_steps, epoch)
tb_writer.add_scalar(tags[1], val_accurate, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
print('Finished Training')
if __name__ == '__main__':
main()
|
the-stack_106_26481 | import math
import numpy as np
import pandas as pd
import statistics
import time
import concurrent
import copy
import random
from concurrent.futures import ProcessPoolExecutor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from formulabot.mep import Solution, Population
from tqdm import tqdm
def main():
source_data_path = r"C:\Users\markr\Projects\Software\FormulaBot\data\hypotenuse_01.csv"
results_path = r"C:\Users\markr\Projects\Software\FormulaBot\data\hypotenuse_01_results.csv"
scenarios_to_run = 25
df = pd.read_csv(source_data_path)
X = df[['X','Y']].to_dict(orient='records')
Y = df['out'].tolist()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33)
rng = np.arange(scenarios_to_run)
d = {}
results = []
d['parms'] = list(df.columns[:-1])
d['X_train'] = X_train
d['y_train'] = y_train
d['X_test'] = X_test
d['y_test'] = y_test
with ProcessPoolExecutor() as executor:
for _ in rng:
d['population_size'] = random.randint(100,501)
d['operations_size'] = random.randint(10,101)
d['operands_size'] = random.randint(30,51)
d['epochs'] = random.randint(400,1001)
d['crossover_rate'] = random.randint(20, 81)/100.
d['mutation_rate'] = random.randint(1, 31)/100.
d['kill_rate'] = random.randint(1, 31)/100.
results.append(executor.submit(run_scenario, copy.deepcopy(d)))
for r in concurrent.futures.as_completed(results):
write_to_log(r, results_path)
print(f"pop:{r.result()['population_size']} | ops:{r.result()['operations_size']} | opr:{r.result()['operands_size']}")
print(f"epochs: {r.result()['epochs']}, train: {r.result()['train_score']} | test: {r.result()['test_score']}")
def write_to_log(r, fp):
with open(fp, 'a') as f:
f.write(str(r.result()['population_size']))
f.write(',')
f.write(str(r.result()['operations_size']))
f.write(',')
f.write(str(r.result()['operands_size']))
f.write(',')
f.write(str(r.result()['epochs']))
f.write(',')
f.write(str(r.result()['crossover_rate']))
f.write(',')
f.write(str(r.result()['mutation_rate']))
f.write(',')
f.write(str(r.result()['kill_rate']))
f.write(',')
f.write(str(r.result()['train_score']))
f.write(',')
f.write(str(r.result()['test_score']))
f.write(',')
f.write(str(len(r.result()['X_train'])))
f.write(',')
f.write(str(len(r.result()['X_test'])))
f.write(',')
f.write(r.result()['latex_string'])
f.write(',')
f.write(str(r.result()['calc_time']))
f.write(',')
f.write(str(id(r.result)))
f.write('\n')
def run_scenario(d):
start = time.perf_counter()
p = Population(population_size=d['population_size'],
parameters=list(d['parms']),
operations_size=d['operations_size'],
operands_size=d['operands_size'],
epochs=d['epochs'],
crossover_rate=d['crossover_rate'],
mutation_rate=d['mutation_rate'],
kill_rate=d['kill_rate'],
error_calc=mean_squared_error,
inputs=d['X_train'],
outputs=d['y_train'])
# run epochs
p.run_epochs(plot_nb=False)
end = time.perf_counter()
d['train_score'] = p.get_best_score()
d['test_score'] = p.fitness_calc(d['y_test'],
[m for m in map(p.get_best_solution().compute, d['X_test'])])
d['latex_string'] = p.get_best_solution().to_latex_string()
d['calc_time'] = round(end - start, 2)
return(d)
if __name__ == "__main__":
main()
|
the-stack_106_26482 | # encoding: utf-8
from __future__ import annotations
import functools
import inspect
import importlib
from collections import defaultdict, OrderedDict
from logging import getLogger
from typing import Any, Callable, Collection, KeysView, Optional, Union
from types import ModuleType
from ckan.common import config
import ckan.plugins as p
import ckan.model as model
from ckan.common import _, g
import ckan.lib.maintain as maintain
from ckan.types import AuthResult, AuthFunction, DataDict, Context
log = getLogger(__name__)
def get_local_functions(module: ModuleType, include_private: bool = False):
"""Return list of (name, func) tuples.
Filters out all non-callables and all the items that were
imported.
"""
return inspect.getmembers(
module,
lambda func: (inspect.isfunction(func) and
inspect.getmodule(func) is module and
(include_private or not func.__name__.startswith('_'))))
class AuthFunctions:
''' This is a private cache used by get_auth_function() and should never be
accessed directly we will create an instance of it and then remove it.'''
_functions: dict[str, AuthFunction] = {}
def clear(self) -> None:
''' clear any stored auth functions. '''
self._functions.clear()
def keys(self) -> KeysView[str]:
''' Return a list of known auth functions.'''
if not self._functions:
self._build()
return self._functions.keys()
def get(self, function: str) -> Optional[AuthFunction]:
''' Return the requested auth function. '''
if not self._functions:
self._build()
return self._functions.get(function)
@staticmethod
def _is_chained_auth_function(func: AuthFunction) -> bool:
'''
Helper function to check if a function is a chained auth function, i.e.
it has been decorated with the chain auth function decorator.
'''
return getattr(func, 'chained_auth_function', False)
def _build(self) -> None:
'''Gather the auth functions.
First get the default ones in the ckan/logic/auth directory
Rather than writing them out in full will use
importlib.import_module to load anything from ckan.auth that
looks like it might be an authorisation function
'''
module_root = 'ckan.logic.auth'
for auth_module_name in ['get', 'create', 'update', 'delete', 'patch']:
module = importlib.import_module(
'.' + auth_module_name, module_root)
for key, v in get_local_functions(module):
# Whitelist all auth functions defined in
# logic/auth/get.py as not requiring an authorized user,
# as well as ensuring that the rest do. In both cases, do
# nothing if a decorator has already been used to define
# the behaviour
if not hasattr(v, 'auth_allow_anonymous_access'):
if auth_module_name == 'get':
v.auth_allow_anonymous_access = True
else:
v.auth_allow_anonymous_access = False
self._functions[key] = v
# Then overwrite them with any specific ones in the plugins:
resolved_auth_function_plugins: dict[str, str] = {}
fetched_auth_functions = {}
chained_auth_functions = defaultdict(list)
for plugin in p.PluginImplementations(p.IAuthFunctions):
for name, auth_function in plugin.get_auth_functions().items():
if self._is_chained_auth_function(auth_function):
chained_auth_functions[name].append(auth_function)
elif name in resolved_auth_function_plugins:
raise Exception(
'The auth function %r is already implemented in %r' % (
name,
resolved_auth_function_plugins[name]
)
)
else:
resolved_auth_function_plugins[name] = plugin.name
fetched_auth_functions[name] = auth_function
for name, func_list in chained_auth_functions.items():
if (name not in fetched_auth_functions and
name not in self._functions):
raise Exception('The auth %r is not found for chained auth' % (
name))
# create the chain of functions in the correct order
for func in reversed(func_list):
if name in fetched_auth_functions:
prev_func = fetched_auth_functions[name]
else:
# fallback to chaining off the builtin auth function
prev_func = self._functions[name]
new_func = (functools.partial(func, prev_func))
# persisting attributes to the new partial function
for attribute, value in func.__dict__.items():
setattr(new_func, attribute, value)
fetched_auth_functions[name] = new_func
# Use the updated ones in preference to the originals.
self._functions.update(fetched_auth_functions)
_AuthFunctions = AuthFunctions()
#remove the class
del AuthFunctions
def clear_auth_functions_cache() -> None:
_AuthFunctions.clear()
def auth_functions_list() -> KeysView[str]:
'''Returns a list of the names of the auth functions available. Currently
this is to allow the Auth Audit to know if an auth function is available
for a given action.'''
return _AuthFunctions.keys()
def is_sysadmin(username: Optional[str]) -> bool:
''' Returns True is username is a sysadmin '''
user = _get_user(username)
return bool(user and user.sysadmin)
def _get_user(username: Optional[str]) -> Optional['model.User']:
'''
Try to get the user from g, if possible.
If not fallback to using the DB
'''
if not username:
return None
# See if we can get the user without touching the DB
try:
if g.userobj and g.userobj.name == username:
return g.userobj
except AttributeError:
# g.userobj not set
pass
except TypeError:
# c is not available (py2)
pass
except RuntimeError:
# g is not available (py3)
pass
# Get user from the DB
return model.User.get(username)
def get_group_or_org_admin_ids(group_id: Optional[str]) -> list[str]:
if not group_id:
return []
group = model.Group.get(group_id)
if not group:
return []
q = model.Session.query(model.Member.table_id) \
.filter(model.Member.group_id == group.id) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.capacity == 'admin')
# type_ignore_reason: all stored memerships have table_id
return [a.table_id for a in q]
def is_authorized_boolean(action: str, context: Context, data_dict: Optional[DataDict]=None) -> bool:
''' runs the auth function but just returns True if allowed else False
'''
outcome = is_authorized(action, context, data_dict=data_dict)
return outcome.get('success', False)
def is_authorized(action: str, context: Context,
data_dict: Optional[DataDict]=None) -> AuthResult:
if context.get('ignore_auth'):
return {'success': True}
auth_function = _AuthFunctions.get(action)
if auth_function:
username = context.get('user')
user = _get_user(username)
if user:
# deleted users are always unauthorized
if user.is_deleted():
return {'success': False}
# sysadmins can do anything unless the auth_sysadmins_check
# decorator was used in which case they are treated like all other
# users.
elif user.sysadmin:
if not getattr(auth_function, 'auth_sysadmins_check', False):
return {'success': True}
# If the auth function is flagged as not allowing anonymous access,
# and an existing user object is not provided in the context, deny
# access straight away
if not getattr(auth_function, 'auth_allow_anonymous_access', False) \
and not context.get('auth_user_obj'):
if isinstance(auth_function, functools.partial):
name = auth_function.func.__name__
else:
name = auth_function.__name__
return {
'success': False,
'msg': 'Action {0} requires an authenticated user'.format(name)
}
return auth_function(context, data_dict or {})
else:
raise ValueError(_('Authorization function not found: %s' % action))
# these are the permissions that roles have
ROLE_PERMISSIONS: dict[str, list[str]] = OrderedDict([
('admin', ['admin', 'membership']),
('editor', ['read', 'delete_dataset', 'create_dataset',
'update_dataset', 'manage_group']),
('member', ['read', 'manage_group']),
])
def get_collaborator_capacities() -> Collection[str]:
if check_config_permission('allow_admin_collaborators'):
return ('admin', 'editor', 'member')
else:
return ('editor', 'member')
_trans_functions: dict[str, Callable[[], str]] = {
'admin': lambda: _('Admin'),
'editor': lambda: _('Editor'),
'member': lambda: _('Member'),
}
def trans_role(role: str) -> str:
return _trans_functions[role]()
def roles_list() -> list[dict[str, str]]:
''' returns list of roles for forms '''
roles = []
for role in ROLE_PERMISSIONS:
roles.append(dict(text=trans_role(role), value=role))
return roles
def roles_trans() -> dict[str, str]:
''' return dict of roles with translation '''
roles = {}
for role in ROLE_PERMISSIONS:
roles[role] = trans_role(role)
return roles
def get_roles_with_permission(permission: str) -> list[str]:
''' returns the roles with the permission requested '''
roles = []
for role in ROLE_PERMISSIONS:
permissions = ROLE_PERMISSIONS[role]
if permission in permissions or 'admin' in permissions:
roles.append(role)
return roles
def has_user_permission_for_group_or_org(group_id: Optional[str],
user_name: Optional[str],
permission: str) -> bool:
''' Check if the user has the given permissions for the group, allowing for
sysadmin rights and permission cascading down a group hierarchy.
'''
if not group_id:
return False
group = model.Group.get(group_id)
if not group:
return False
group_id = group.id
# Sys admins can do anything
if is_sysadmin(user_name):
return True
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
if _has_user_permission_for_groups(user_id, permission, [group_id]):
return True
capacities = check_config_permission('roles_that_cascade_to_sub_groups')
assert isinstance(capacities, list)
# Handle when permissions cascade. Check the user's roles on groups higher
# in the group hierarchy for permission.
for capacity in capacities:
parent_groups = group.get_parent_group_hierarchy(type=group.type)
group_ids = [group_.id for group_ in parent_groups]
if _has_user_permission_for_groups(user_id, permission, group_ids,
capacity=capacity):
return True
return False
def _has_user_permission_for_groups(
user_id: str, permission: str, group_ids: list[str],
capacity: Optional[str]=None) -> bool:
''' Check if the user has the given permissions for the particular
group (ignoring permissions cascading in a group hierarchy).
Can also be filtered by a particular capacity.
'''
if not group_ids:
return False
# get any roles the user has for the group
q: Any = (model.Session.query(model.Member.capacity)
# type_ignore_reason: attribute has no method
.filter(model.Member.group_id.in_(group_ids)) # type: ignore
.filter(model.Member.table_name == 'user')
.filter(model.Member.state == 'active')
.filter(model.Member.table_id == user_id))
if capacity:
q = q.filter(model.Member.capacity == capacity)
# see if any role has the required permission
# admin permission allows anything for the group
for row in q:
perms = ROLE_PERMISSIONS.get(row.capacity, [])
if 'admin' in perms or permission in perms:
return True
return False
def users_role_for_group_or_org(
group_id: Optional[str], user_name: Optional[str]) -> Optional[str]:
''' Returns the user's role for the group. (Ignores privileges that cascade
in a group hierarchy.)
'''
if not group_id:
return None
group = model.Group.get(group_id)
if not group:
return None
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return None
# get any roles the user has for the group
q: Any = model.Session.query(model.Member.capacity) \
.filter(model.Member.group_id == group.id) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_id == user_id)
# return the first role we find
for row in q:
return row.capacity
return None
def has_user_permission_for_some_org(
user_name: Optional[str], permission: str) -> bool:
''' Check if the user has the given permission for any organization. '''
user_id = get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
roles = get_roles_with_permission(permission)
if not roles:
return False
# get any groups the user has with the needed role
q: Any = (model.Session.query(model.Member.group_id)
.filter(model.Member.table_name == 'user')
.filter(model.Member.state == 'active')
# type_ignore_reason: attribute has no method
.filter(model.Member.capacity.in_(roles)) # type: ignore
.filter(model.Member.table_id == user_id))
group_ids = []
for row in q:
group_ids.append(row.group_id)
# if not in any groups has no permissions
if not group_ids:
return False
# see if any of the groups are orgs
permission_exists: bool = model.Session.query(
model.Session.query(model.Group)
.filter(model.Group.is_organization == True)
.filter(model.Group.state == 'active')
# type_ignore_reason: attribute has no method
.filter(model.Group.id.in_(group_ids)).exists() # type: ignore
).scalar()
return permission_exists
def get_user_id_for_username(
user_name: Optional[str], allow_none: bool = False) -> Optional[str]:
''' Helper function to get user id '''
# first check if we have the user object already and get from there
user = _get_user(user_name)
if user:
return user.id
if allow_none:
return None
raise Exception('Not logged in user')
def can_manage_collaborators(package_id: str, user_id: str) -> bool:
'''
Returns True if a user is allowed to manage the collaborators of a given
dataset.
Currently a user can manage collaborators if:
1. Is an administrator of the organization the dataset belongs to
2. Is a collaborator with role "admin" (
assuming :ref:`ckan.auth.allow_admin_collaborators` is set to True)
3. Is the creator of the dataset and the dataset does not belong to an
organization (
requires :ref:`ckan.auth.create_dataset_if_not_in_organization`
and :ref:`ckan.auth.create_unowned_dataset`)
'''
pkg = model.Package.get(package_id)
if not pkg:
return False
owner_org = pkg.owner_org
if (not owner_org
and check_config_permission('create_dataset_if_not_in_organization')
and check_config_permission('create_unowned_dataset')
and pkg.creator_user_id == user_id):
# User is the creator of this unowned dataset
return True
if has_user_permission_for_group_or_org(
owner_org, user_id, 'membership'):
# User is an administrator of the organization the dataset belongs to
return True
# Check if user is a collaborator with admin role
return user_is_collaborator_on_dataset(user_id, pkg.id, 'admin')
def user_is_collaborator_on_dataset(
user_id: str, dataset_id: str,
capacity: Optional[Union[str, list[str]]] = None) -> bool:
'''
Returns True if the provided user is a collaborator on the provided
dataset.
If capacity is provided it restricts the check to the capacity
provided (eg `admin` or `editor`). Multiple capacities can be
provided passing a list
'''
q = model.Session.query(model.PackageMember) \
.filter(model.PackageMember.user_id == user_id) \
.filter(model.PackageMember.package_id == dataset_id)
if capacity:
if isinstance(capacity, str):
capacity = [capacity]
# type_ignore_reason: attribute has no method
q = q.filter(model.PackageMember.capacity.in_(capacity)) # type: ignore
return model.Session.query(q.exists()).scalar()
CONFIG_PERMISSIONS_DEFAULTS: dict[str, Union[bool, str]] = {
# permission and default
# these are prefixed with ckan.auth. in config to override
'anon_create_dataset': False,
'create_dataset_if_not_in_organization': True,
'create_unowned_dataset': True,
'user_create_groups': True,
'user_create_organizations': True,
'user_delete_groups': True,
'user_delete_organizations': True,
'create_user_via_api': False,
'create_user_via_web': True,
'roles_that_cascade_to_sub_groups': 'admin',
'public_activity_stream_detail': False,
'allow_dataset_collaborators': False,
'allow_admin_collaborators': False,
'allow_collaborators_to_change_owner_org': False,
'create_default_api_keys': False,
}
def check_config_permission(permission: str) -> Union[list[str], bool]:
'''Returns the configuration value for the provided permission
Permission is a string indentifying the auth permission (eg
`anon_create_dataset`), optionally prefixed with `ckan.auth.`.
The possible values for `permission` are the keys of
CONFIG_PERMISSIONS_DEFAULTS. These can be overriden in the config file
by prefixing them with `ckan.auth.`.
Returns the permission value, generally True or False, except on
`roles_that_cascade_to_sub_groups` which is a list of strings.
'''
key = permission.replace('ckan.auth.', '')
if key not in CONFIG_PERMISSIONS_DEFAULTS:
return False
config_key = 'ckan.auth.' + key
value = config.get_value(config_key)
return value
@maintain.deprecated('Use auth_is_loggedin_user instead', since="2.2.0")
def auth_is_registered_user() -> bool:
'''
This function is deprecated, please use the auth_is_loggedin_user instead
'''
return auth_is_loggedin_user()
def auth_is_loggedin_user() -> bool:
''' Do we have a logged in user '''
try:
context_user = g.user
except TypeError:
context_user = None
return bool(context_user)
def auth_is_anon_user(context: Context) -> bool:
''' Is this an anonymous user?
eg Not logged in if a web request and not user defined in context
if logic functions called directly
See ckan/lib/base.py:232 for pylons context object logic
'''
context_user = context.get('user')
is_anon_user = not bool(context_user)
return is_anon_user
|
the-stack_106_26483 | """
Platform for the garadget cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/garadget/
"""
import logging
import voluptuous as vol
import requests
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import CONF_DEVICE, CONF_USERNAME, CONF_PASSWORD,\
CONF_ACCESS_TOKEN, CONF_NAME, STATE_UNKNOWN, STATE_CLOSED, STATE_OPEN,\
CONF_COVERS
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = 'Garadget'
ATTR_SIGNAL_STRENGTH = "wifi signal strength (dB)"
ATTR_TIME_IN_STATE = "time in state"
ATTR_SENSOR_STRENGTH = "sensor reflection rate"
ATTR_AVAILABLE = "available"
STATE_OPENING = "opening"
STATE_CLOSING = "closing"
STATE_STOPPED = "stopped"
STATE_OFFLINE = "offline"
STATES_MAP = {
"open": STATE_OPEN,
"opening": STATE_OPENING,
"closed": STATE_CLOSED,
"closing": STATE_CLOSING,
"stopped": STATE_STOPPED
}
# Validation of the user's configuration
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo covers."""
covers = []
devices = config.get(CONF_COVERS, {})
_LOGGER.debug(devices)
for device_id, device_config in devices.items():
args = {
"name": device_config.get(CONF_NAME),
"device_id": device_config.get(CONF_DEVICE, device_id),
"username": device_config.get(CONF_USERNAME),
"password": device_config.get(CONF_PASSWORD),
"access_token": device_config.get(CONF_ACCESS_TOKEN)
}
covers.append(GaradgetCover(hass, args))
add_devices(covers)
class GaradgetCover(CoverDevice):
"""Representation of a demo cover."""
# pylint: disable=no-self-use, too-many-instance-attributes
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = 'https://api.particle.io'
self.hass = hass
self._name = args['name']
self.device_id = args['device_id']
self.access_token = args['access_token']
self.obtained_token = False
self._username = args['username']
self._password = args['password']
self._state = STATE_UNKNOWN
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
# Lets try to get the configured name if not provided.
try:
if self._name is None:
doorconfig = self._get_variable("doorConfig")
if doorconfig["nme"] is not None:
self._name = doorconfig["nme"]
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state == STATE_UNKNOWN:
return None
else:
return self._state == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
def get_token(self):
"""Get new token for usage during this session."""
args = {
'grant_type': 'password',
'username': self._username,
'password': self._password
}
url = '{}/oauth/token'.format(self.particle_url)
ret = requests.post(url,
auth=('particle', 'particle'),
data=args)
return ret.json()['access_token']
def remove_token(self):
"""Remove authorization token from API."""
ret = requests.delete('{}/v1/access_tokens/{}'.format(
self.particle_url,
self.access_token),
auth=(self._username, self._password))
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.schedule_update_ha_state(True)
def close_cover(self):
"""Close the cover."""
if self._state not in ["close", "closing"]:
ret = self._put_command("setState", "close")
self._start_watcher('close')
return ret.get('return_value') == 1
def open_cover(self):
"""Open the cover."""
if self._state not in ["open", "opening"]:
ret = self._put_command("setState", "open")
self._start_watcher('open')
return ret.get('return_value') == 1
def stop_cover(self):
"""Stop the door where it is."""
if self._state not in ["stopped"]:
ret = self._put_command("setState", "stop")
self._start_watcher('stop')
return ret['return_value'] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable("doorStatus")
_LOGGER.debug("Current Status: %s", status['status'])
self._state = STATES_MAP.get(status['status'], STATE_UNKNOWN)
self.time_in_state = status['time']
self.signal = status['signal']
self.sensor = status['sensor']
self._availble = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = '{}/v1/devices/{}/{}?access_token={}'.format(
self.particle_url,
self.device_id,
var,
self.access_token,
)
ret = requests.get(url)
result = {}
for pairs in ret.json()['result'].split('|'):
key = pairs.split('=')
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {'access_token': self.access_token}
if arg:
params['command'] = arg
url = '{}/v1/devices/{}/{}'.format(
self.particle_url,
self.device_id,
func)
ret = requests.post(url, data=params)
return ret.json()
|
the-stack_106_26484 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import MinicoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(MinicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timewait = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
|
the-stack_106_26485 | import os
import re
import pandas as pd
from extractor import extract
from voluptuous import (Schema, Required, All, Optional, Length, Any,
MultipleInvalid, Match, Coerce)
# Lookups
iso_country = pd.read_csv('./Lookups/ISO_COUNTRY.csv', dtype='str',
encoding='latin', keep_default_na=False)
cpv = pd.read_csv('./Lookups/CPV.csv', dtype='str')
ma = pd.read_csv('./Lookups/MA_MAIN_ACTIVITY.csv', dtype='str')
td = pd.read_csv('./Lookups/TD_DOCUMENT_TYPE.csv', dtype='str')
nc = pd.read_csv('./Lookups/NC_CONTRACT_NATURE.csv', dtype='str')
aa = pd.read_csv('./Lookups/AA_AUTHORITY_TYPE.csv', dtype='str')
pr = pd.read_csv('./Lookups/PR_PROC.csv', dtype='str')
ty = pd.read_csv('./Lookups/TY_TYPE_BID.csv', dtype='str')
ac = pd.read_csv('./Lookups/AC_AWARD_CRIT.csv', dtype='str')
rp = pd.read_csv('./Lookups/RP_REGULATION.csv', dtype='str')
# Allowed Currencies
currencies = ['EUR', 'BGN', 'CHF', 'USD', 'HRK', 'CZK', 'DKK', 'HUF', 'SEK',
'NOK', 'LTL', 'TRY', 'PLN', 'MKD', 'RON', 'JPY', 'ISK', 'SKK',
'LVL', 'GBP', 'MTL', 'CYP', 'EEK']
def number(s):
n = re.sub(r'\s', '', s.replace(',', '.').replace('%', ''))
return float(n)
def concatenate(lst):
return ' '.join(lst)
def flat(lst):
return lst[0]
# Sub Schemas
value = Schema({
Optional('CURRENCY'): All(str, Any(*currencies)),
Optional(str): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)) # Let it pass
})
contract_value = Schema({
Optional(str): value,
Optional('NUMBER_OF_YEARS'): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)), # Let it pass
Optional('NUMBER_OF_MONTHS'): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)) # Let it pass
})
contractor = Schema({
Optional(str): Any([], All(Coerce(flat), str)),
Optional('COUNTRY'): Any([], All(Coerce(flat), str, Length(2),
Any(*iso_country.Code)))
})
match_nuts = Match('^(' + '|'.join(iso_country.Code) + ')')
match_cpv = Match('^(' + '|'.join(cpv.CODE) + ')')
# Document Schema
schema = Schema({
Required('DOC_ID'): str,
Required('CODED_DATA'): {
Required('NOTICE_DATA'): {
Required('NO_DOC_OJS'): All(Coerce(flat), str),
Required('ORIGINAL_NUTS'): [All(str, match_nuts)],
Required('ORIGINAL_CPV'): [All(str, match_cpv)],
Required('ISO_COUNTRY'): All(Coerce(flat), str, Length(2),
Any(*iso_country.Code)),
Required('IA_URL_GENERAL'): Any([], All(Coerce(flat), str)),
Required('REF_NOTICE'): [str],
Required('VALUES_LIST'): {
Optional('GLOBAL_VALUE'): value,
Optional('CONTRACTS_VALUE'): [value]
}
},
Required('CODIF_DATA'): {
Required('DS_DATE_DISPATCH'): All(Coerce(flat), str),
Required('TD_DOCUMENT_TYPE'): All(Coerce(flat), str,
Any(*td.CODE)),
Required('AA_AUTHORITY_TYPE'): All(Coerce(flat), str,
Any(*aa.CODE)),
Required('NC_CONTRACT_NATURE'): All(Coerce(flat), str,
Any(*nc.CODE)),
Required('PR_PROC'): All(Coerce(flat), str, Any(*pr.CODE)),
Required('RP_REGULATION'): All(Coerce(flat), str, Any(*rp.CODE)),
Required('TY_TYPE_BID'): All(Coerce(flat), str, Any(*ty.CODE)),
Required('AC_AWARD_CRIT'): All(Coerce(flat), str, Any(*ac.CODE)),
Required('MA_MAIN_ACTIVITIES'): [All(str, Any(*ma.CODE))]
}
},
Required('CONTRACT'): {
Required('OTH_NOT'): All(Coerce(flat), str, Any('YES', 'NO')),
Optional('CONTRACTING_AUTHORITY'): All(Coerce(flat), str),
Optional('CONTRACT_OBJECT'): {
Optional('NUTS'): [All(str, match_nuts)],
Optional('NUTS_EXTRA'): All(Coerce(concatenate), str),
Optional('CPV_MAIN'): Any([], All(Coerce(flat), str, match_cpv)),
Optional('CONTRACT_VALUE'): contract_value,
Optional(str): Any([], All(Coerce(flat), str, Any('YES', 'NO'))),
},
Optional('AWARDS_OF_CONTRACT'): [{
Optional('CONTRACTOR'): contractor,
Optional('CONTRACT_VALUE'): contract_value,
}]
}
})
def prune(node):
if isinstance(node, list):
for n in node:
prune(n)
elif isinstance(node, dict):
for k in list(node.keys()):
if node[k]:
prune(node[k])
else:
del node[k]
if __name__ == "__main__":
Y = '2013'
M = '01'
years = ['2013', '2014', '2015', '2016']
months = ['01', '02', '03', '04', '05', '06', '07', '08',
'09', '10', '11', '12']
collection = []
for Y in years:
print(Y)
for M in months:
print(M)
# Folder containing xml files
DIR = os.path.join('/Volumes/WD/S8', Y + '-' + M)
# List xml files
files = os.listdir(DIR)
for f in files:
# Extract data from xml file
file_path = os.path.join(DIR, f)
data = extract(file_path)
try:
data = schema(data)
except MultipleInvalid as e:
print(str(e) + ' ---- file: ' + file_path)
prune(data)
collection.append(data)
print(collection)
break
|
the-stack_106_26486 | import logging
def setup_logging(debug=False):
root_logger = logging.getLogger()
debug_fomatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname).4s [%(name)s] %(message)s",
datefmt="%H:%M:%S",
)
logger_handle = logging.StreamHandler()
logger_handle.setFormatter(debug_fomatter)
if debug:
logger_handle.setLevel(logging.DEBUG)
else:
logger_handle.setLevel(logging.WARNING)
root_logger.addHandler(logger_handle)
root_logger.setLevel(0)
|
the-stack_106_26487 | import numpy as np
try:
from scipy.sparse.linalg import spsolve
from scipy.sparse import coo_matrix, eye
except ImportError:
pass
from . import triangles
from .util import unitize
from .geometry import index_sparse
from .triangles import mass_properties
def filter_laplacian(mesh,
lamb=0.5,
iterations=10,
implicit_time_integration=False,
volume_constraint=True,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing.
Articles
1 - "Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
2 - "Implicit Fairing of Irregular Meshes using Diffusion
and Curvature Flow". M. Desbrun, M. Meyer,
P. Schroder, A.H.B. Caltech
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
lamb : float
Diffusion speed constant
If 0.0, no diffusion
If > 0.0, diffusion occurs
implicit_time_integration: boolean
if False: explicit time integration
-lamb <= 1.0 - Stability Limit (Article 1)
if True: implicit time integration
-lamb no limit (Article 2)
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# save initial volume
if volume_constraint:
vol_ini = mesh.volume
# get mesh vertices and faces as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
faces = mesh.faces.copy().view(np.ndarray)
# Set matrix for linear system of equations
if implicit_time_integration:
dlap = laplacian_operator.shape[0]
AA = eye(dlap) + lamb * (eye(dlap) - laplacian_operator)
# Number of passes
for _index in range(iterations):
# Classic Explicit Time Integration - Article 1
if not implicit_time_integration:
dot = laplacian_operator.dot(vertices) - vertices
vertices += lamb * dot
# Implicit Time Integration - Article 2
else:
vertices = spsolve(AA, vertices)
# volume constraint
if volume_constraint:
# find the volume with new vertex positions
vol_new = triangles.mass_properties(
vertices[faces], skip_inertia=True)["volume"]
# scale by volume ratio
vertices *= ((vol_ini / vol_new) ** (1.0 / 3.0))
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_humphrey(mesh,
alpha=0.1,
beta=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and Humphrey filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
alpha : float
Controls shrinkage, range is 0.0 - 1.0
If 0.0, not considered
If 1.0, no smoothing
beta : float
Controls how aggressive smoothing is
If 0.0, no smoothing
If 1.0, full aggressiveness
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# save original unmodified vertices
original = vertices.copy()
# run through iterations of filter
for _index in range(iterations):
vert_q = vertices.copy()
vertices = laplacian_operator.dot(vertices)
vert_b = vertices - (alpha * original + (1.0 - alpha) * vert_q)
vertices -= (beta * vert_b + (1.0 - beta) *
laplacian_operator.dot(vert_b))
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_taubin(mesh,
lamb=0.5,
nu=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and taubin filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place.
lamb : float
Controls shrinkage, range is 0.0 - 1.0
nu : float
Controls dilation, range is 0.0 - 1.0
Nu shall be between 0.0 < 1.0/lambda - 1.0/nu < 0.1
iterations : int
Number of passes to run the filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# run through multiple passes of the filter
for index in range(iterations):
# do a sparse dot product on the vertices
dot = laplacian_operator.dot(vertices) - vertices
# alternate shrinkage and dilation
if index % 2 == 0:
vertices += lamb * dot
else:
vertices -= nu * dot
# assign updated vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_mut_dif_laplacian(mesh,
lamb=0.5,
iterations=10,
volume_constraint=True,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing using a
mutable difusion laplacian.
Articles
Barroqueiro, B., Andrade-Campos, A., Dias-de-Oliveira,
J., and Valente, R. (January 21, 2021).
"Bridging between topology optimization and additive
manufacturing via Laplacian smoothing." ASME. J. Mech. Des.
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
lamb : float
Diffusion speed constant
If 0.0, no diffusion
If > 0.0, diffusion occurs
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# Set volume constraint
if volume_constraint:
v_ini = mesh.volume
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
faces = mesh.faces.copy().view(np.ndarray)
eps = 0.01 * (np.max(mesh.area_faces)**0.5)
# Number of passes
for _index in range(iterations):
# Mutable difusion
normals = get_vertices_normals(mesh)
qi = laplacian_operator.dot(vertices)
pi_qi = vertices - qi
adil = np.abs((normals * pi_qi).dot(np.ones((3, 1))))
adil = 1.0 / np.maximum(1e-12, adil)
lamber = np.maximum(
0.2 * lamb, np.minimum(1.0, lamb * adil / np.mean(adil)))
# Filter
dot = laplacian_operator.dot(vertices) - vertices
vertices += lamber * dot
# Volume constraint
if volume_constraint:
vol = mass_properties(vertices[faces], skip_inertia=True)["volume"]
if _index == 0:
slope = dilate_slope(vertices, faces, normals, vol, eps)
vertices += normals * slope * (v_ini - vol)
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def laplacian_calculation(mesh, equal_weight=True, pinned_vertices=[]):
"""
Calculate a sparse matrix for laplacian operations.
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
equal_weight : bool
If True, all neighbors will be considered equally
If False, all neighbors will be weighted by inverse distance
Returns
----------
laplacian : scipy.sparse.coo.coo_matrix
Laplacian operator
"""
# get the vertex neighbors from the cache
neighbors = mesh.vertex_neighbors
# if a node is pinned, it will average his coordinates by himself
# in practice it will not move
for i in pinned_vertices:
neighbors[i] = [i]
# avoid hitting crc checks in loops
vertices = mesh.vertices.view(np.ndarray)
# stack neighbors to 1D arrays
col = np.concatenate(neighbors)
row = np.concatenate([[i] * len(n)
for i, n in enumerate(neighbors)])
if equal_weight:
# equal weights for each neighbor
data = np.concatenate([[1.0 / len(n)] * len(n)
for n in neighbors])
else:
# umbrella weights, distance-weighted
# use dot product of ones to replace array.sum(axis=1)
ones = np.ones(3)
# the distance from verticesex to neighbors
norms = [
1.0 / np.maximum(1e-6, np.sqrt(np.dot(
(vertices[i] - vertices[n]) ** 2, ones)))
for i, n in enumerate(neighbors)]
# normalize group and stack into single array
data = np.concatenate([i / i.sum() for i in norms])
# create the sparse matrix
matrix = coo_matrix((data, (row, col)),
shape=[len(vertices)] * 2)
return matrix
def get_vertices_normals(mesh):
"""
Compute Vertex normals using equal weighting of neighbors faces.
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
Returns
----------
vertices_normals: array
Vertices normals
"""
# get mesh vertices and faces
vertices = mesh.vertices
faces = mesh.faces
# get face normals
face_normals = mesh.face_normals
# Compute Vert normals
vert_normals = index_sparse(len(vertices), faces).dot(face_normals)
return unitize(vert_normals)
def dilate_slope(vertices, faces, normals, v, eps):
"""
Get de derivate of dilation scalar by the volume variation by finite diferences
Thus, Vertices += vertex_normals*dilate_slope*(Initial_Volume - Srinked_Volume)
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
vertices: mesh.vertices
faces: mesh.faces
normals: array
vertices normals
Returns
----------
dilate_slope: float
derivative
"""
# finite diference derivative
vertices2 = vertices + normals * eps
v2 = mass_properties(vertices2[faces], skip_inertia=True)["volume"]
return (eps) / (v2 - v)
|
the-stack_106_26490 | #!/usr/bin/env python3
"""The setup script."""
from setuptools import find_packages, setup
with open('requirements.txt') as f:
requirements = f.read().strip().split('\n')
with open('README.md') as f:
long_description = f.read()
setup(
maintainer='Xdev',
maintainer_email='[email protected]',
python_requires='>=3.7',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
description='A high-level mapping of name/key to Xarray.Datasets.',
install_requires=requirements,
license='Apache Software License 2.0',
long_description_content_type='text/markdown',
long_description=long_description,
include_package_data=True,
keywords='xarray',
name='xcollection',
packages=find_packages(),
entry_points={},
url='https://github.com/NCAR/xcollection',
project_urls={
'Documentation': 'https://github.com/NCAR/xcollection',
'Source': 'https://github.com/NCAR/xcollection',
'Tracker': 'https://github.com/NCAR/xcollection/issues',
},
use_scm_version={
'version_scheme': 'post-release',
'local_scheme': 'dirty-tag',
},
setup_requires=['setuptools_scm', 'setuptools>=30.3.0'],
zip_safe=False,
)
|
the-stack_106_26491 | import json
import re
import sys
from collections import defaultdict
from datetime import date, datetime
from functools import wraps
import click
from .cve_api import CveApi, CveApiError
from . import __version__
CVE_RE = re.compile(r"^CVE-[12]\d{3}-\d{4,}$")
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"max_content_width": 100,
}
def validate_cve(ctx, param, value):
if value is None:
return
if not CVE_RE.match(value):
raise click.BadParameter("invalid CVE ID.")
return value
def validate_year(ctx, param, value):
if value is None:
return
# Hopefully this code won't be around in year 10,000.
if not re.match(r"^[1-9]\d{3}$", value):
raise click.BadParameter("invalid year.")
return value
def human_ts(ts):
return datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%c")
def print_cve(cve):
click.secho(cve["cve_id"], bold=True)
click.echo(f"├─ State:\t{cve['state']}")
# CVEs reserved by other CNAs do not include information on who requested them and when.
if "requested_by" in cve:
click.echo(f"├─ Owning CNA:\t{cve['owning_cna']}")
click.echo(f"├─ Reserved by:\t{cve['requested_by']['user']} ({cve['requested_by']['cna']})")
click.echo(f"└─ Reserved on:\t{human_ts(cve['reserved'])}")
else:
click.echo(f"└─ Owning CNA:\t{cve['owning_cna']}")
def print_table(lines):
"""Print tabulated data based on the widths of the longest values in each column."""
col_widths = []
for item_index in range(len(lines[0])):
max_len_value = max(lines, key=lambda x: len(x[item_index]))
col_widths.append(len(max_len_value[item_index]))
for idx, line in enumerate(lines):
text = "".join(f"{value:<{width + 3}}" for value, width in zip(line, col_widths)).strip()
if idx == 0:
click.secho(text, bold=True)
else:
click.echo(text)
def print_json_data(data):
click.echo(json.dumps(data, indent=4, sort_keys=True))
def print_user(user):
name = get_full_name(user)
if name:
click.echo(f"{name} — ", nl=False)
click.echo(user["username"])
click.echo(f"├─ Active:\t{bool_to_text(user['active'])}")
click.echo(f"├─ Roles:\t{', '.join(user['authority']['active_roles']) or 'None'}")
# Time values are not returned when creating users; secrets (API tokens) are however.
if "time" in user:
click.echo(f"├─ Created:\t{human_ts(user['time']['created'])}")
click.echo(f"└─ Modified:\t{human_ts(user['time']['modified'])}")
elif "secret" in user:
click.echo(f"└─ API token:\t{user['secret']}")
def get_full_name(user_data):
# If no name values are defined on a user, the entire `name` object is not returned in the
# user data response; see https://github.com/CVEProject/cve-services/issues/436.
name = user_data.get("name", {})
if name:
return f"{name.get('first', '')} {name.get('last', '')}".strip() or None
def bool_to_text(value):
if value is None:
return "N/A"
return "Yes" if value else "No"
def natural_cve_sort(cve):
if not cve:
return []
return [int(x) for x in cve.split("-")[1:]]
def handle_cve_api_error(func):
"""Decorator for catching CVE API exceptions and formatting the error message."""
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except CveApiError as exc:
error, _, details = str(exc).partition("; returned error: ")
click.secho("ERROR: ", bold=True, nl=False)
click.echo(error)
if details:
click.secho("DETAILS: ", bold=True, nl=False)
click.echo(details)
sys.exit(1)
return wrapped
class Config:
def __init__(self, username, org, api_key, env, api_url, interactive):
self.username = username
self.org = org
self.api_key = api_key
self.env = env
self.api_url = api_url
self.interactive = interactive
self.cve_api = self.init_cve_api()
def init_cve_api(self):
return CveApi(
username=self.username,
org=self.org,
api_key=self.api_key,
env=self.env,
url=self.api_url,
)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option(
"-u", "--username", envvar="CVE_USER", required=True, help="Your username (env var: CVE_USER)"
)
@click.option(
"-o",
"--org",
envvar="CVE_ORG",
required=True,
help="Your CNA organization short name (env var: CVE_ORG)",
)
@click.option(
"-a",
"--api-key",
envvar="CVE_API_KEY",
required=True,
help="Your API key (env var: CVE_API_KEY)",
)
@click.option(
"-e",
"--env",
envvar="CVE_ENVIRONMENT",
default="prod",
type=click.Choice(["prod", "dev"]),
help="Select deployment environment to query (env var: CVE_ENVIRONMENT)",
)
@click.option(
"--api-url",
envvar="CVE_API_URL",
help="Provide arbitrary URL for the CVE API (env var: CVE_API_URL)",
)
@click.option(
"-i",
"--interactive",
envvar="CVE_INTERACTIVE",
default=False,
is_flag=True,
help="Confirm create/update actions before execution (env var: CVE_INTERACTIVE)",
)
@click.version_option(
__version__, "-V", "--version", prog_name="cvelib", message="%(prog)s %(version)s"
)
@click.pass_context
def cli(ctx, username, org, api_key, env, api_url, interactive):
"""A CLI interface for the CVE Services API."""
ctx.obj = Config(username, org, api_key, env, api_url, interactive)
@cli.command()
@click.option(
"-r",
"--random",
default=False,
show_default=True,
is_flag=True,
help="Reserve multiple CVE IDs non-sequentially.",
)
@click.option(
"-y",
"--year",
default=lambda: str(date.today().year),
callback=validate_year,
help="Reserve CVE ID(s) for a given year.",
show_default="current year",
)
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.argument("count", default=1, type=click.IntRange(min=1))
@click.pass_context
@handle_cve_api_error
def reserve(ctx, random, year, count, print_raw):
"""Reserve one or more CVE IDs. COUNT is the number of CVEs to reserve; defaults to 1.
CVE IDs can be reserved one by one (the lowest IDs are reserved first) or in batches of
multiple IDs per single request. When reserving multiple IDs, you can request those IDs to be
generated sequentially (default) or non-sequentially (random IDs are selected from your CVE ID
range).
For more information, see: "Developer Guide to CVE Services API" (https://git.io/JLcmZ)
"""
if random and count > 10:
raise click.BadParameter("requesting non-sequential CVE IDs is limited to 10 per request.")
if ctx.obj.interactive:
click.echo("You are about to reserve ", nl=False)
if count > 1:
click.secho(
f"{count} {'non-sequential' if random else 'sequential'} ", bold=True, nl=False
)
click.echo("CVE IDs for year ", nl=False)
else:
click.secho("1 ", bold=True, nl=False)
click.echo("CVE ID for year ", nl=False)
click.secho(year, bold=True, nl=False)
click.echo(" that will be owned by the ", nl=False)
click.secho(ctx.obj.org, bold=True, nl=False)
click.echo(" CNA org.")
if not click.confirm("This operation cannot be reversed; do you want to continue?"):
click.echo("Exiting...")
sys.exit(0)
click.echo()
cve_api = ctx.obj.cve_api
cve_data, remaining_quota = cve_api.reserve(count, random, year)
if print_raw:
print_json_data(cve_data)
return
click.echo("Reserved the following CVE ID(s):\n")
for cve in cve_data["cve_ids"]:
print_cve(cve)
click.echo(f"\nRemaining quota: {remaining_quota}")
@cli.command(name="show")
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.argument("cve_id", callback=validate_cve)
@click.pass_context
@handle_cve_api_error
def show_cve(ctx, print_raw, cve_id):
"""Display a specific CVE ID owned by your CNA."""
cve_api = ctx.obj.cve_api
cve = cve_api.show_cve(cve_id=cve_id)
if print_raw:
print_json_data(cve)
else:
print_cve(cve)
@cli.command(name="list")
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.option(
"--sort-by",
type=click.Choice(["cve_id", "state", "user", "reserved"], case_sensitive=False),
default="cve_id",
help="Sort output.",
)
@click.option("--year", callback=validate_year, help="Filter by year.")
@click.option(
"--state",
type=click.Choice(["reserved", "public", "reject"], case_sensitive=False),
help="Filter by reservation state.",
)
@click.option(
"--reserved-lt", type=click.DateTime(), help="Filter by reservation time before timestamp."
)
@click.option(
"--reserved-gt", type=click.DateTime(), help="Filter by reservation time after timestamp."
)
@click.pass_context
@handle_cve_api_error
def list_cves(ctx, print_raw, sort_by, **query):
"""Filter and list reserved CVE IDs owned by your CNA."""
cve_api = ctx.obj.cve_api
cves = list(cve_api.list_cves(**query))
if print_raw:
print_json_data(cves)
return
if not cves:
click.echo("No CVEs found...")
return
if sort_by:
key = sort_by.lower()
if key == "user":
cves.sort(key=lambda x: x["requested_by"]["user"])
elif key == "cve_id":
cves.sort(key=lambda x: natural_cve_sort(x["cve_id"]))
elif key == "reserved_asc":
cves.sort(key=lambda x: x["reserved"])
elif key == "state":
cves.sort(key=lambda x: x["state"])
lines = [("CVE ID", "STATE", "OWNING CNA", "REQUESTED BY", "RESERVED")]
for cve in cves:
lines.append(
(
cve["cve_id"],
cve["state"],
cve["owning_cna"],
f"{cve['requested_by']['user']} ({cve['requested_by']['cna']})",
human_ts(cve["reserved"]),
)
)
print_table(lines)
@cli.command()
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def quota(ctx, print_raw):
"""Display the available CVE ID quota for your CNA.
\b
- "Limit": how many CVE IDs your organization can have in the RESERVED state at once.
- "Reserved": the number of CVE IDs that are in the RESERVED state across all years.
- "Available": the number of CVE IDs that can be reserved (that is, "Limit" - "Reserved")
"""
cve_api = ctx.obj.cve_api
cve_quota = cve_api.quota()
if print_raw:
print_json_data(cve_quota)
return
click.echo("CNA quota for ", nl=False)
click.secho(f"{ctx.obj.org}", bold=True, nl=False)
click.echo(f":")
click.echo(f"├─ Limit:\t{cve_quota['id_quota']}")
click.echo(f"├─ Reserved:\t{cve_quota['total_reserved']}")
click.echo(f"└─ Available:\t{cve_quota['available']}")
@cli.group(name="user", invoke_without_command=True)
@click.option(
"-u",
"--username",
help="Specify the user to show.",
show_default="Current user specified in -u/--username/CVE_USER",
)
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def show_user(ctx, username, print_raw):
"""Show information about a user."""
if ctx.invoked_subcommand is not None:
return
cve_api = ctx.obj.cve_api
if not username:
username = cve_api.username
user = cve_api.show_user(username)
if print_raw:
print_json_data(user)
else:
print_user(user)
@show_user.command()
@click.option(
"-u",
"--username",
help="User whose API token should be reset (only ADMIN role users can update other users).",
show_default="Current user specified in global -u/--username/CVE_USER",
)
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def reset_token(ctx, username, print_raw):
"""Reset a user's personal access token (API token).
This token is used to authenticate each request to the CVE API.
"""
cve_api = ctx.obj.cve_api
if not username:
username = cve_api.username
api_key = cve_api.reset_api_token(username)
if print_raw:
print_json_data(api_key)
return
click.echo(f"New API token for ", nl=False)
click.secho(username, bold=True, nl=False)
click.echo(":\n")
click.secho(api_key["API-secret"], bold=True)
click.echo("\nMake sure to copy your new API token; you won't be able to access it again!")
@show_user.command(name="update")
@click.option(
"-u",
"--username",
help="Username of the user being updated (only ADMIN role users can update other users).",
show_default="Current user specified in global -u/--username/CVE_USER",
)
@click.option(
"--mark-active/--mark-inactive", "active", default=None, help="Mark user as active or inactive."
)
@click.option("--new-username", help="Update username.")
@click.option("--name-first", help="Update first name.")
@click.option("--name-last", help="Update last name.")
@click.option("--add-role", help="Add role.", type=click.Choice(CveApi.USER_ROLES))
@click.option("--remove-role", help="Remove role.", type=click.Choice(CveApi.USER_ROLES))
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def update_user(ctx, username, **opts_data):
"""Update a user.
To reset a user's API token, use `cve user reset-token`.
"""
print_raw = opts_data.pop("print_raw")
cve_api = ctx.obj.cve_api
if not username:
username = cve_api.username
user_updates = {}
for opt, value in opts_data.items():
if value is not None:
if opt.startswith("name"):
opt = opt.replace("_", ".")
elif opt.endswith("role"):
opt = "active_roles." + opt.replace("_role", "")
elif opt == "active":
# Convert boolean to string since this data is passed as query params
value = str(value).lower()
user_updates[opt] = value
if not user_updates:
raise click.UsageError("No updates were provided.")
if ctx.obj.interactive:
click.echo("You are about to update the ", nl=False)
click.secho(username, bold=True, nl=False)
click.echo(" user with the following changes:\n")
for key, value in user_updates.items():
click.echo(f"- {key}: ", nl=False)
click.secho(str(value), bold=True)
if not click.confirm("\nDo you want to continue?"):
click.echo("Exiting...")
sys.exit(0)
click.echo()
updated_user = cve_api.update_user(username, **user_updates)
if print_raw:
print_json_data(updated_user)
else:
click.echo("User updated.")
@show_user.command(name="create")
@click.option("-u", "--username", default="", required=True, help="Set username.")
@click.option("--name-first", default="", help="Set first name.")
@click.option("--name-last", default="", help="Set last name.")
@click.option(
"--role", "roles", help="Set role.", multiple=True, type=click.Choice(CveApi.USER_ROLES)
)
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def create_user(ctx, username, name_first, name_last, roles, print_raw):
"""Create a user in your organization.
This action is restricted to users with the ADMIN role.
Note: Once a user is created, they cannot be removed, only marked as inactive. Only create
users when you really need them.
"""
user_data = defaultdict(dict)
if username:
user_data["username"] = username
if name_first:
user_data["name"]["first"] = name_first
if name_last:
user_data["name"]["last"] = name_last
if roles:
user_data["authority"]["active_roles"] = list(roles)
if ctx.obj.interactive:
click.echo("You are about to create the following user under your ", nl=False)
click.secho(ctx.obj.org, bold=True, nl=False)
click.echo(f" org:\n\nUsername:\t", nl=False)
click.secho(username, bold=True)
click.echo("Full name:\t", nl=False)
click.secho(name_first + name_last or "None", bold=True)
click.echo(f"Roles:\t\t", nl=False)
click.secho(", ".join(roles) or "None", bold=True)
click.echo("\nThis action cannot be undone; created users can only be marked as inactive.")
if not click.confirm("Do you want to continue?"):
click.echo("Exiting...")
sys.exit(0)
click.echo()
cve_api = ctx.obj.cve_api
created_user = cve_api.create_user(**user_data)["created"]
if print_raw:
print_json_data(created_user)
return
click.echo("Created user:\n")
print_user(created_user)
click.echo("\nMake sure to copy the returned API token; you won't be able to access it again!")
@cli.group(name="org", invoke_without_command=True)
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def show_org(ctx, print_raw):
"""Show information about your organization."""
if ctx.invoked_subcommand is not None:
return
cve_api = ctx.obj.cve_api
org_data = cve_api.show_org()
if print_raw:
print_json_data(org_data)
return
click.echo(f"{org_data['name']} — {org_data['short_name']}")
click.echo(f"├─ Roles:\t{', '.join(org_data['authority']['active_roles']) or 'None'}")
click.echo(f"├─ Created:\t{human_ts(org_data['time']['created'])}")
click.echo(f"└─ Modified:\t{human_ts(org_data['time']['modified'])}")
@show_org.command()
@click.option("--raw", "print_raw", default=False, is_flag=True, help="Print response JSON.")
@click.pass_context
@handle_cve_api_error
def users(ctx, print_raw):
"""List all users in your organization."""
cve_api = ctx.obj.cve_api
org_users = list(cve_api.list_users())
if print_raw:
print_json_data(org_users)
return
lines = []
for user in org_users:
lines.append(
(
user["username"],
str(get_full_name(user)),
", ".join(user["authority"]["active_roles"]) or "None",
bool_to_text(user["active"]),
human_ts(user["time"]["created"]),
human_ts(user["time"]["modified"]),
)
)
lines.sort(key=lambda x: x[0]) # Sort by username
# Add header after sorting
lines.insert(0, ("USERNAME", "NAME", "ROLES", "ACTIVE", "CREATED", "MODIFIED"))
print_table(lines)
@cli.command()
@click.pass_context
def ping(ctx):
"""Ping the CVE Services API to see if it is up."""
cve_api = ctx.obj.cve_api
ok, error_msg = cve_api.ping()
click.echo(f"CVE API Status — {cve_api.url}\n└─ ", nl=False)
if ok:
click.secho(f"OK", fg="green")
else:
click.secho("ERROR:", bold=True, nl=False)
click.echo(f" {error_msg}")
|
the-stack_106_26492 | from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import types
from toontown.toon import NPCToons
from toontown.toon import NPCFriendPanel
from toontown.toonbase import ToontownBattleGlobals
class TownBattleSOSPanel(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('TownBattleSOSPanel')
def __init__(self, doneEvent):
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(TownBattleSOSPanel)
StateData.StateData.__init__(self, doneEvent)
self.friends = {}
self.NPCFriends = {}
self.textRolloverColor = Vec4(1, 1, 0, 1)
self.textDownColor = Vec4(0.5, 0.9, 1, 1)
self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
self.bldg = 0
self.chosenNPCToons = []
return
def load(self):
if self.isLoaded == 1:
return None
self.isLoaded = 1
bgd = loader.loadModel('phase_3.5/models/gui/frame')
gui = loader.loadModel('phase_3.5/models/gui/frame4names')
scrollGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
backGui = loader.loadModel('phase_3.5/models/gui/battle_gui')
self['image'] = bgd
self['image_pos'] = (0.0, 0.1, -0.08)
self.setScale(0.3)
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNoFriends, text_scale=0.4, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(0.0, 0.0, 1.5))
self.NPCFriendPanel = NPCFriendPanel.NPCFriendPanel(parent=self, doneEvent=self.doneEvent)
self.NPCFriendPanel.setPos(-0.75, 0, -0.15)
self.NPCFriendPanel.setScale(0.325)
self.NPCFriendsLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNPCFriends, text_scale=0.3, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(-0.75, 0.0, -2.0))
self.scrollList = DirectScrolledList(parent=self, relief=None, image=gui.find('**/frame4names'), image_scale=(0.11, 1, 0.1), text=TTLocalizer.FriendsListPanelOnlineFriends, text_scale=0.04, text_pos=(-0.02, 0.275), text_fg=(0, 0, 0, 1), incButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.3), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.175), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(-0.17, 0.0, 0.11), itemFrame_relief=None, numItemsVisible=9, items=[], pos=(2.4, 0.0, 0.025), scale=3.5)
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.32, 0, 0)))
clipNP = self.scrollList.component('itemFrame').attachNewNode(clipper)
self.scrollList.component('itemFrame').setClipPlane(clipNP)
self.close = DirectButton(parent=self, relief=None, image=(backGui.find('**/PckMn_BackBtn'), backGui.find('**/PckMn_BackBtn_Dn'), backGui.find('**/PckMn_BackBtn_Rlvr')), pos=(2.3, 0.0, -1.65), scale=3, text=TTLocalizer.TownBattleSOSBack, text_scale=0.05, text_pos=(0.01, -0.012), text_fg=Vec4(0, 0, 0.8, 1), command=self.__close)
gui.removeNode()
scrollGui.removeNode()
backGui.removeNode()
bgd.removeNode()
self.hide()
return
def unload(self):
if self.isLoaded == 0:
return None
self.isLoaded = 0
self.exit()
del self.title
del self.scrollList
del self.close
del self.friends
del self.NPCFriends
DirectFrame.destroy(self)
return None
def makeFriendButton(self, friendPair):
friendId, flags = friendPair
handle = base.cr.playerFriendsManager.identifyFriend(friendId)
if handle == None:
base.cr.fillUpFriendsMap()
return
friendName = handle.getName()
fg = Vec4(0.0, 0.0, 0.0, 1.0)
if handle.isPet():
com = self.__chosePet
else:
com = self.__choseFriend
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=com, extraArgs=[friendId, friendName])
def makeNPCFriendButton(self, NPCFriendId, numCalls):
if NPCFriendId not in TTLocalizer.NPCToonNames:
return None
friendName = TTLocalizer.NPCToonNames[NPCFriendId]
friendName += ' %d' % numCalls
fg = Vec4(0.0, 0.0, 0.0, 1.0)
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=self.__choseNPCFriend, extraArgs=[NPCFriendId])
def enter(self, canLure = 1, canTrap = 1):
if self.isEntered == 1:
return None
self.isEntered = 1
if self.isLoaded == 0:
self.load()
self.canLure = canLure
self.canTrap = canTrap
self.factoryToonIdList = None
messenger.send('SOSPanelEnter', [self])
self.__updateScrollList()
self.__updateNPCFriendsPanel()
self.__updateTitleText()
self.show()
self.accept('friendOnline', self.__friendOnline)
self.accept('friendOffline', self.__friendOffline)
self.accept('friendsListChanged', self.__friendsListChanged)
self.accept('friendsMapComplete', self.__friendsListChanged)
return
def exit(self):
if self.isEntered == 0:
return None
self.isEntered = 0
self.hide()
self.ignore('friendOnline')
self.ignore('friendOffline')
self.ignore('friendsListChanged')
self.ignore('friendsMapComplete')
messenger.send(self.doneEvent)
return None
def __close(self):
doneStatus = {}
doneStatus['mode'] = 'Back'
messenger.send(self.doneEvent, [doneStatus])
def __choseFriend(self, friendId, friendName):
doneStatus = {}
doneStatus['mode'] = 'Friend'
doneStatus['friend'] = friendId
messenger.send(self.doneEvent, [doneStatus])
def __chosePet(self, petId, petName):
doneStatus = {}
doneStatus['mode'] = 'Pet'
doneStatus['petId'] = petId
doneStatus['petName'] = petName
messenger.send(self.doneEvent, [doneStatus])
def __choseNPCFriend(self, friendId):
doneStatus = {}
doneStatus['mode'] = 'NPCFriend'
doneStatus['friend'] = friendId
self.chosenNPCToons.append(friendId)
messenger.send(self.doneEvent, [doneStatus])
def setFactoryToonIdList(self, toonIdList):
self.factoryToonIdList = toonIdList[:]
def __updateScrollList(self):
newFriends = []
battlePets = base.config.GetBool('want-pets-in-battle', 1)
if base.wantPets and battlePets == 1 and base.localAvatar.hasPet():
newFriends.append((base.localAvatar.getPetId(), 0))
if not self.bldg or self.factoryToonIdList is not None:
for friendPair in base.localAvatar.friendsList:
if base.cr.isFriendOnline(friendPair[0]):
if self.factoryToonIdList is None or friendPair[0] in self.factoryToonIdList:
newFriends.append(friendPair)
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
newFriends.append((avatarId, 0))
for friendPair in self.friends.keys():
if friendPair not in newFriends:
friendButton = self.friends[friendPair]
self.scrollList.removeItem(friendButton)
if not friendButton.isEmpty():
friendButton.destroy()
del self.friends[friendPair]
for friendPair in newFriends:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair)
if friendButton:
self.scrollList.addItem(friendButton)
self.friends[friendPair] = friendButton
return
def __updateNPCFriendsPanel(self):
self.NPCFriends = {}
for friend, count in base.localAvatar.NPCFriendsDict.items():
track = NPCToons.getNPCTrack(friend)
if track == ToontownBattleGlobals.LURE_TRACK and self.canLure == 0 or track == ToontownBattleGlobals.TRAP_TRACK and self.canTrap == 0:
self.NPCFriends[friend] = 0
else:
self.NPCFriends[friend] = count
self.NPCFriendPanel.update(self.NPCFriends, fCallable=1)
def __updateTitleText(self):
isEmpty = (len(self.friends) == 0 and len(self.NPCFriends) == 0)
if isEmpty:
self.title['text'] = TTLocalizer.TownBattleSOSNoFriends
else:
self.title['text'] = TTLocalizer.TownBattleSOSWhichFriend
def __friendOnline(self, doId, commonChatFlags, whitelistChatFlags):
self.__updateScrollList()
self.__updateTitleText()
def __friendOffline(self, doId):
self.__updateScrollList()
self.__updateTitleText()
def __friendsListChanged(self):
self.__updateScrollList()
self.__updateTitleText()
|
the-stack_106_26493 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''Test generateblock rpc.
'''
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class GenerateBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info('Generate an empty block to address')
address = node.getnewaddress()
hash = node.generateblock(output=address, transactions=[])['hash']
block = node.getblock(blockhash=hash, verbose=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][1]
['scriptPubKey']['addresses'][0], address)
self.log.info('Generate an empty block to a descriptor')
hash = node.generateblock('addr(' + address + ')', [])['hash']
block = node.getblock(blockhash=hash, verbosity=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][1]
['scriptPubKey']['addresses'][0], address)
self.log.info(
'Generate an empty block to a combo descriptor with compressed pubkey')
combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
combo_address = 'bchreg:qp63uahgrxged4z5jswyt5dn5v3lzsem6c6mz8vuwd'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][1]['scriptPubKey']
['addresses'][0], combo_address)
self.log.info(
'Generate an empty block to a combo descriptor with uncompressed pubkey')
combo_key = '0408ef68c46d20596cc3f6ddf7c8794f71913add807f1dc55949fa805d764d191c0b7ce6894c126fce0babc6663042f3dde9b0cf76467ea315514e5a6731149c67'
combo_address = 'bchreg:qqmagqc48ln8p7zk6ez2h64amcamr86qwqezwt52uy'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][1]['scriptPubKey']
['addresses'][0], combo_address)
# Generate 110 blocks to spend
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get
# mined
for i in range(10):
node.sendtoaddress(address, 0.1)
self.log.info('Generate block with txid')
txid = node.sendtoaddress(address, 100)
hash = node.generateblock(address, [txid])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
assert_equal(block['tx'][1], txid)
self.log.info('Generate block with raw tx')
utxos = node.listunspent(addresses=[address])
raw = node.createrawtransaction(
[{'txid': utxos[0]['txid'], 'vout':utxos[0]['vout']}], [{address: 100}])
signed_raw = node.signrawtransactionwithwallet(raw)['hex']
hash = node.generateblock(address, [signed_raw])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
txid = block['tx'][1]
assert_equal(node.gettransaction(txid)['hex'], signed_raw)
self.log.info('Fail to generate block with out of order txs')
raw1 = node.createrawtransaction(
[{'txid': txid, 'vout': 0}], [{address: 99.99}])
signed_raw1 = node.signrawtransactionwithwallet(raw1)['hex']
txid1 = node.sendrawtransaction(signed_raw1)
raw2 = node.createrawtransaction(
[{'txid': txid1, 'vout': 0}], [{address: 99.9}])
signed_raw2 = node.signrawtransactionwithwallet(raw2)['hex']
txid2 = node.sendrawtransaction(signed_raw2)
# Reversed CTOR
txids = sorted([txid1, txid2], reverse=True)
assert_raises_rpc_error(-25,
'TestBlockValidity failed: tx-ordering',
node.generateblock,
address,
txids)
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
assert_raises_rpc_error(-5,
'Transaction ' + missing_txid + ' not in mempool.',
node.generateblock,
address,
[missing_txid])
self.log.info('Fail to generate block with invalid raw tx')
invalid_raw_tx = '0000'
assert_raises_rpc_error(-22,
'Transaction decode failed for ' + invalid_raw_tx,
node.generateblock,
address,
[invalid_raw_tx])
self.log.info('Fail to generate block with invalid address/descriptor')
assert_raises_rpc_error(-5,
'Invalid address or descriptor',
node.generateblock,
'1234',
[])
self.log.info('Fail to generate block with a ranged descriptor')
ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)'
assert_raises_rpc_error(
-8,
'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?',
node.generateblock,
ranged_descriptor,
[])
self.log.info(
'Fail to generate block with a descriptor missing a private key')
child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)'
assert_raises_rpc_error(-5,
'Cannot derive script without private keys',
node.generateblock,
child_descriptor,
[])
if __name__ == '__main__':
GenerateBlockTest().main()
|
the-stack_106_26495 | # Author: Acer Zhang
# Datetime: 2021/10/27
# Copyright belongs to the author.
# Please indicate the source for reprinting.
from setuptools import setup
from setuptools import find_packages
__version__ = "0.1"
setup(
name='AgentEnc',
version=__version__,
packages=['agentenc', 'agentenc.ops', 'agentenc.encryptors'],
url='https://github.com/AgentMaker/AgentEncryption',
license='Apache2',
author='GT-ZhangAcer',
author_email='[email protected]',
description='飞桨模型加密库',
install_requires=["ppqi",
"pycryptodome"],
python_requires='>3.5',
include_package_data=True,
)
|
the-stack_106_26496 | import logging
from modules.location import Location, gen_plr
import const
class_name = "Outside"
class Outside(Location):
prefix = "o"
def __init__(self, server):
super().__init__(server)
self.commands.update({"r": self.room, "gr": self.get_room})
def get_room(self, msg, client):
online = self.server.online.copy()
num = 1
while True:
i = 0
for tmp in online:
if tmp.room == f"{msg[2]['lid']}_{msg[2]['gid']}_{num}":
i += 1
if i >= const.ROOM_LIMIT:
num += 1
else:
break
room = f"{msg[2]['lid']}_{msg[2]['gid']}_{num}"
if client.room:
logging.error(f"Already in room, uid - {client.uid}")
return
client.room = room
client.position = (-1.0, -1.0)
client.action_tag = ""
client.state = 0
client.dimension = 4
plr = gen_plr(client, self.server)
for tmp in self.server.online.copy():
if tmp.room != client.room:
continue
tmp.send(["o.r.jn", {"plr": plr}])
tmp.send([client.room, client.uid], type_=16)
client.send(["o.gr", {"rid": client.room}])
def room(self, msg, client):
subcommand = msg[1].split(".")[2]
if subcommand == "info":
rmmb = []
room = msg[0]
for tmp in self.server.online.copy():
if tmp.room != room:
continue
rmmb.append(gen_plr(tmp, self.server))
client.send(["o.r.info", {"rmmb": rmmb, "evn": None}])
else:
super().room(msg, client)
|
the-stack_106_26498 | #!/usr/bin/env python
# coding: utf-8
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
TrainingArguments,
TrainerCallback,
EarlyStoppingCallback
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from transformers.trainer_callback import TrainerControl
from datasets import load_dataset, load_metric, load_from_disk
import os
import sys
import time
import random
import shutil
import torch
import pandas as pd
from torch.utils.data import DataLoader
from sibyl import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
data_dir = "../../data1/fabricehc/prepared_datasets/"
save_dir = "../../data1/fabricehc/results_a100/"
local_save_dir = "./results/"
def tokenize_fn(batch):
if sentence2_key is None:
return tokenizer(batch[sentence1_key], padding=True, truncation=True, max_length=250)
return tokenizer(batch[sentence1_key], batch[sentence2_key], padding=True, truncation=True, max_length=250)
def sibyl_tokenize_fn(text1, text2):
if text2 is None:
return tokenizer(text1, padding=True, truncation=True, max_length=250, return_tensors='pt')
return tokenizer(text1, text2, padding=True, truncation=True, max_length=250, return_tensors='pt')
early_stopping_patience = 5
num_valid_per_class = 2000
num_runs = 3
num_train_examples = [10]
MODEL_NAMES = ['bert-base-uncased'] # ['bert-base-uncased', 'roberta-base', 'xlnet-base-cased']
ts = ['ORIG'] + [t.__name__ for t in TRANSFORMATIONS]
tasks = ['ag_news', 'imdb', 'dbpedia_14', 'yahoo_answers_topics', 'yelp_polarity', 'amazon_polarity']
run_args = []
for run_num in range(num_runs):
for num_train_per_class in num_train_examples:
for task in tasks:
for MODEL_NAME in MODEL_NAMES:
for t in ts:
run_args.append({
"run_num":run_num,
"num_train_per_class":num_train_per_class,
"task":task,
"MODEL_NAME":MODEL_NAME,
"t":t
})
results = []
save_file = 'transform_eval_all_10.csv'
if os.path.exists(save_file):
results.extend(pd.read_csv(save_file).to_dict("records"))
start_position = len(results)
else:
start_position = 0
print('starting at position {}'.format(start_position))
for run_arg in run_args[start_position:]:
run_num = run_arg['run_num']
num_train_per_class = run_arg['num_train_per_class']
task = run_arg['task']
MODEL_NAME = run_arg['MODEL_NAME']
t = run_arg['t']
print(pd.DataFrame([run_arg]))
task_to_keys = {
"ag_news": {"keys": ("text", None), "num_classes": 4, "task_type": "topic"},
"dbpedia_14": {"keys": ("text", None), "num_classes": 14, "task_type": "topic"},
"yahoo_answers_topics": {"keys": ("text", None), "num_classes": 10, "task_type": "topic"},
"imdb": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"},
"amazon_polarity": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"},
"yelp_polarity": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"}
}
sentence1_key, sentence2_key = task_to_keys[task]["keys"]
num_classes = task_to_keys[task]["num_classes"]
task_type = task_to_keys[task]["task_type"]
#############################################################
## Model + Tokenizer ########################################
#############################################################
checkpoint = local_save_dir + MODEL_NAME + '-' + task + '-' + t
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=num_classes).to(device)
#############################################################
## Dataset Preparation ######################################
#############################################################
train_data_path = os.path.join(data_dir, task, 'ORIG', task + '_train_' + str(num_train_per_class))
valid_data_path = os.path.join(data_dir, task, 'ORIG', task + '_valid_' + str(num_valid_per_class))
train_dataset = load_from_disk(train_data_path)
eval_dataset = load_from_disk(valid_data_path)
test_dataset = load_dataset(task, split='test')
# train_dataset = train_dataset.rename_column("label", "labels")
# special handling for yahoo and dbpedia_14 datasets
if task in ["dbpedia_14", "amazon_polarity"]:
test_dataset = test_dataset.rename_column("content", "text")
if task == "yahoo_answers_topics":
test_dataset = test_dataset.map(lambda example : {'text' : example['question_title'] + " " +
example['question_content'] + " " +
example['best_answer'],
'label': example['topic']})
print('Length of train_dataset', num_classes * num_train_per_class, len(train_dataset))
print('Length of valid_dataset', num_classes * num_valid_per_class, len(eval_dataset))
assert num_classes * num_train_per_class == len(train_dataset)
assert num_classes * num_valid_per_class == len(eval_dataset)
eval_dataset = eval_dataset.map(tokenize_fn, batched=True, batch_size=len(eval_dataset))
eval_dataset = eval_dataset.rename_column("label", "labels")
eval_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
test_dataset = test_dataset.map(tokenize_fn, batched=True, batch_size=len(test_dataset))
test_dataset = test_dataset.rename_column("label", "labels")
test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
#############################################################
## Callbacks + Collator #####################################
#############################################################
callbacks = []
tmcb = None
if early_stopping_patience > 0:
escb = EarlyStoppingCallback(
early_stopping_patience=early_stopping_patience
)
callbacks.append(escb)
transform = None
num_sampled_INV = 0
num_sampled_SIB = 0
label_type = "soft"
if t == "ORIG":
label_type = "hard"
elif t == "INV":
num_sampled_INV = 2
label_type = "hard"
elif t == "SIB":
num_sampled_SIB = 2
label_type = "soft"
elif t == 'INVSIB':
num_sampled_INV = 1
num_sampled_SIB = 1
label_type = None
label_type = "soft"
else:
transform = getattr(sys.modules[__name__], t)
if hasattr(transform, 'uses_dataset'):
transform = transform(dataset=task)
else:
transform = transform()
if t in ['TextMix', 'SentMix', 'WordMix', 'ConceptMix']:
tmcb = TargetedMixturesCallback(
dataloader=DataLoader(eval_dataset, batch_size=4),
device=device,
sentence1_key=sentence1_key,
sentence2_key=sentence2_key,
num_classes=num_classes
)
callbacks.append(tmcb)
label_type = "soft"
if sentence2_key is not None:
continue # skip Mixture mutations for 2-input tasks
collator = SibylCollator(
sentence1_key=sentence1_key,
sentence2_key=sentence2_key,
tokenize_fn=sibyl_tokenize_fn,
transform=transform,
num_sampled_INV=num_sampled_INV,
num_sampled_SIB=num_sampled_SIB,
dataset=task,
num_outputs = 3,
keep_original = True,
task_type=task_type,
tran_type=None,
label_type=None,
one_hot=label_type != "hard",
transform_prob=1.0,
target_pairs=[],
target_prob=0.,
reduce_mixed=False,
num_classes=num_classes,
return_tensors='pt',
return_text=False
)
#############################################################
## Trainer Setup ############################################
#############################################################
train_batch_size = 16
eval_batch_size = 64
num_epoch = 30
gradient_accumulation_steps = 1
max_steps = int((len(train_dataset) * num_epoch / gradient_accumulation_steps) / train_batch_size)
logging_steps = max_steps // (num_epoch / 2)
training_args = TrainingArguments(
output_dir=checkpoint,
overwrite_output_dir=True,
max_steps=max_steps,
save_steps=logging_steps,
save_total_limit=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=eval_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
warmup_steps=int(max_steps / 10),
weight_decay=0.01,
logging_dir='./logs',
logging_steps=logging_steps,
logging_first_step=True,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
greater_is_better=True,
evaluation_strategy="steps",
remove_unused_columns=False
)
trainer = SibylTrainer(
model=model,
tokenizer=tokenizer,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=collator,
callbacks=callbacks
)
start_time = time.time()
trainer.train()
run_time = time.time() - start_time
# test with ORIG data
trainer.eval_dataset = test_dataset
trainer.data_collator = DefaultCollator()
if tmcb:
trainer.remove_callback(tmcb)
out = trainer.evaluate()
out['run_num'] = run_num
out['num_train_per_class'] = num_train_per_class
out['task'] = task
out['transform'] = t
out['run'] = checkpoint
out['model_name'] = MODEL_NAME
out['transform'] = t
out['test'] = "ORIG"
out['run_time'] = start_time
print('ORIG for {}\n{}'.format(checkpoint, out))
results.append(out)
# save results
df = pd.DataFrame(results)
df.to_csv(save_file)
shutil.rmtree(checkpoint) |
the-stack_106_26499 | """
weasyprint.formatting_structure.build
-------------------------------------
Turn an element tree with associated CSS style (computed values)
into a "before layout" formatting structure / box tree.
This includes creating anonymous boxes and processing whitespace
as necessary.
"""
import copy
import re
import unicodedata
import tinycss2.color3
from .. import html
from ..css import computed_values, properties
from ..logger import LOGGER
from . import boxes
# Maps values of the ``display`` CSS property to box types.
BOX_TYPE_FROM_DISPLAY = {
'block': boxes.BlockBox,
'list-item': boxes.BlockBox,
'inline': boxes.InlineBox,
'inline-block': boxes.InlineBlockBox,
'table': boxes.TableBox,
'inline-table': boxes.InlineTableBox,
'table-row': boxes.TableRowBox,
'table-row-group': boxes.TableRowGroupBox,
'table-header-group': boxes.TableRowGroupBox,
'table-footer-group': boxes.TableRowGroupBox,
'table-column': boxes.TableColumnBox,
'table-column-group': boxes.TableColumnGroupBox,
'table-cell': boxes.TableCellBox,
'table-caption': boxes.TableCaptionBox,
'flex': boxes.FlexBox,
'inline-flex': boxes.InlineFlexBox,
}
def build_formatting_structure(element_tree, style_for, get_image_from_uri,
base_url, target_collector, counter_style):
"""Build a formatting structure (box tree) from an element tree."""
box_list = element_to_box(
element_tree, style_for, get_image_from_uri, base_url,
target_collector, counter_style)
if box_list:
box, = box_list
else:
# No root element
def root_style_for(element, pseudo_type=None):
style = style_for(element, pseudo_type)
if style:
if element == element_tree:
style['display'] = 'block'
else:
style['display'] = 'none'
return style
box, = element_to_box(
element_tree, root_style_for, get_image_from_uri, base_url,
target_collector, counter_style)
target_collector.check_pending_targets()
box.is_for_root_element = True
# If this is changed, maybe update weasy.layout.pages.make_margin_boxes()
process_whitespace(box)
box = anonymous_table_boxes(box)
box = flex_boxes(box)
box = inline_in_block(box)
box = block_in_inline(box)
box = set_viewport_overflow(box)
return box
def make_box(element_tag, style, content, element):
box = BOX_TYPE_FROM_DISPLAY[style['display']](
element_tag, style, element, content)
return box
def element_to_box(element, style_for, get_image_from_uri, base_url,
target_collector, counter_style, state=None):
"""Convert an element and its children into a box with children.
Return a list of boxes. Most of the time the list will have one item but
may have zero or more than one.
Eg.::
<p>Some <em>emphasised</em> text.</p>
gives (not actual syntax)::
BlockBox[
TextBox['Some '],
InlineBox[
TextBox['emphasised'],
],
TextBox[' text.'],
]
``TextBox``es are anonymous inline boxes:
See http://www.w3.org/TR/CSS21/visuren.html#anonymous
"""
if not isinstance(element.tag, str):
# We ignore comments and XML processing instructions.
return []
style = style_for(element)
# TODO: should be the used value. When does the used value for `display`
# differ from the computer value?
display = style['display']
if display == 'none':
return []
box = make_box(element.tag, style, [], element)
if state is None:
# use a list to have a shared mutable object
state = (
# Shared mutable objects:
[0], # quote_depth: single integer
{}, # counter_values: name -> stacked/scoped values
[set()] # counter_scopes: element tree depths -> counter names
)
_quote_depth, counter_values, counter_scopes = state
update_counters(state, style)
children = []
# If this element’s direct children create new scopes, the counter
# names will be in this new list
counter_scopes.append(set())
box.first_letter_style = style_for(element, 'first-letter')
box.first_line_style = style_for(element, 'first-line')
marker_boxes = []
if style['display'] == 'list-item':
marker_boxes = list(marker_to_box(
element, state, style, style_for, get_image_from_uri,
target_collector, counter_style))
children.extend(marker_boxes)
children.extend(before_after_to_box(
element, 'before', state, style_for, get_image_from_uri,
target_collector, counter_style))
# collect anchor's counter_values, maybe it's a target.
# to get the spec-conform counter_values we must do it here,
# after the ::before is parsed and before the ::after is
if style['anchor']:
target_collector.store_target(style['anchor'], counter_values, box)
text = element.text
if text:
children.append(boxes.TextBox.anonymous_from(box, text))
for child_element in element:
children.extend(element_to_box(
child_element, style_for, get_image_from_uri, base_url,
target_collector, counter_style, state))
text = child_element.tail
if text:
text_box = boxes.TextBox.anonymous_from(box, text)
if children and isinstance(children[-1], boxes.TextBox):
children[-1].text += text_box.text
else:
children.append(text_box)
children.extend(before_after_to_box(
element, 'after', state, style_for, get_image_from_uri,
target_collector, counter_style))
# Scopes created by this element’s children stop here.
for name in counter_scopes.pop():
counter_values[name].pop()
if not counter_values[name]:
counter_values.pop(name)
box.children = children
# calculate string-set and bookmark-label
set_content_lists(
element, box, style, counter_values, target_collector, counter_style)
if marker_boxes and len(box.children) == 1:
# See https://www.w3.org/TR/css-lists-3/#list-style-position-outside
#
# "The size or contents of the marker box may affect the height of the
# principal block box and/or the height of its first line box, and in
# some cases may cause the creation of a new line box; this
# interaction is also not defined."
#
# We decide here to add a zero-width space to have a minimum
# height. Adding text boxes is not the best idea, but it's not a good
# moment to add an empty line box, and the specification lets us do
# almost what we want, so…
if style['list_style_position'] == 'outside':
box.children.append(boxes.TextBox.anonymous_from(box, ''))
# Specific handling for the element. (eg. replaced element)
return html.handle_element(element, box, get_image_from_uri, base_url)
def before_after_to_box(element, pseudo_type, state, style_for,
get_image_from_uri, target_collector, counter_style):
"""Return the boxes for ::before or ::after pseudo-element."""
style = style_for(element, pseudo_type)
if pseudo_type and style is None:
# Pseudo-elements with no style at all do not get a style dict.
# Their initial content property computes to 'none'.
return []
# TODO: should be the computed value. When does the used value for
# `display` differ from the computer value? It's at least wrong for
# `content` where 'normal' computes as 'inhibit' for pseudo elements.
display = style['display']
content = style['content']
if 'none' in (display, content) or content in ('normal', 'inhibit'):
return []
box = make_box('%s::%s' % (element.tag, pseudo_type), style, [], element)
quote_depth, counter_values, _counter_scopes = state
update_counters(state, style)
children = []
if display == 'list-item':
marker_boxes = list(marker_to_box(
element, state, style, style_for, get_image_from_uri,
target_collector, counter_style))
children.extend(marker_boxes)
children.extend(content_to_boxes(
style, box, quote_depth, counter_values, get_image_from_uri,
target_collector, counter_style))
box.children = children
return [box]
def marker_to_box(element, state, parent_style, style_for, get_image_from_uri,
target_collector, counter_style):
"""Yield the box for ::marker pseudo-element if there is one.
https://drafts.csswg.org/css-lists-3/#marker-pseudo
"""
style = style_for(element, 'marker')
children = []
# TODO: should be the computed value. When does the used value for
# `display` differ from the computer value? It's at least wrong for
# `content` where 'normal' computes as 'inhibit' for pseudo elements.
quote_depth, counter_values, _counter_scopes = state
box = make_box('%s::marker' % element.tag, style, children, element)
if style['display'] == 'none':
return
image_type, image = style['list_style_image']
if style['content'] not in ('normal', 'inhibit'):
children.extend(content_to_boxes(
style, box, quote_depth, counter_values, get_image_from_uri,
target_collector, counter_style))
else:
if image_type == 'url':
# image may be None here too, in case the image is not available.
image = get_image_from_uri(image)
if image is not None:
box = boxes.InlineReplacedBox.anonymous_from(box, image)
children.append(box)
if not children and style['list_style_type'] != 'none':
counter_value = counter_values.get('list-item', [0])[-1]
counter_type = style['list_style_type']
# TODO: rtl numbered list has the dot on the left
marker_text = counter_style.render_marker(
counter_type, counter_value)
box = boxes.TextBox.anonymous_from(box, marker_text)
box.style['white_space'] = 'pre-wrap'
children.append(box)
if not children:
return
if parent_style['list_style_position'] == 'outside':
marker_box = boxes.BlockBox.anonymous_from(box, children)
# We can safely edit everything that can't be changed by user style
# See https://drafts.csswg.org/css-pseudo-4/#marker-pseudo
marker_box.style['position'] = 'absolute'
if parent_style['direction'] == 'ltr':
translate_x = properties.Dimension(-100, '%')
else:
translate_x = properties.Dimension(100, '%')
translate_y = computed_values.ZERO_PIXELS
marker_box.style['transform'] = (
('translate', (translate_x, translate_y)),)
else:
marker_box = boxes.InlineBox.anonymous_from(box, children)
yield marker_box
def _collect_missing_counter(counter_name, counter_values, missing_counters):
"""Collect missing counters."""
if counter_name not in list(counter_values) + missing_counters:
missing_counters.append(counter_name)
def _collect_missing_target_counter(counter_name, lookup_counter_values,
anchor_name, missing_target_counters):
"""Collect missing target counters.
The corresponding TargetLookupItem caches the target's page based
counter values during pagination.
"""
if counter_name not in lookup_counter_values:
missing_counters = missing_target_counters.setdefault(anchor_name, [])
if counter_name not in missing_counters:
missing_counters.append(counter_name)
def compute_content_list(content_list, parent_box, counter_values, css_token,
parse_again, target_collector, counter_style,
get_image_from_uri=None, quote_depth=None,
quote_style=None, context=None, page=None,
element=None):
"""Compute and return the boxes corresponding to the ``content_list``.
``parse_again`` is called to compute the ``content_list`` again when
``target_collector.lookup_target()`` detected a pending target.
``build_formatting_structure`` calls
``target_collector.check_pending_targets()`` after the first pass to do
required reparsing.
"""
# TODO: Some computation done here may be done in computed_values
# instead. We currently miss at least style_for, counters and quotes
# context in computer. Some work will still need to be done here though,
# like box creation for URIs.
boxlist = []
texts = []
missing_counters = []
missing_target_counters = {}
in_page_context = context is not None and page is not None
# Collect missing counters during build_formatting_structure.
# Pointless to collect missing target counters in MarginBoxes.
need_collect_missing = target_collector.collecting and not in_page_context
# TODO: remove attribute or set a default value in Box class
if not hasattr(parent_box, 'cached_counter_values'):
# Store the counter_values in the parent_box to make them accessible
# in @page context. Obsoletes the parse_again function's deepcopy.
# TODO: Is propbably superfluous in_page_context.
parent_box.cached_counter_values = copy.deepcopy(counter_values)
for type_, value in content_list:
if type_ == 'string':
texts.append(value)
elif type_ == 'url' and get_image_from_uri is not None:
origin, uri = value
if origin != 'external':
# Embedding internal references is impossible
continue
image = get_image_from_uri(uri)
if image is not None:
text = ''.join(texts)
if text:
boxlist.append(
boxes.TextBox.anonymous_from(parent_box, text))
texts = []
boxlist.append(
boxes.InlineReplacedBox.anonymous_from(parent_box, image))
elif type_ == 'content()':
added_text = TEXT_CONTENT_EXTRACTORS[value](parent_box)
# Simulate the step of white space processing
# (normally done during the layout)
added_text = added_text.strip()
texts.append(added_text)
elif type_ == 'counter()':
counter_name, counter_type = value
if need_collect_missing:
_collect_missing_counter(
counter_name, counter_values, missing_counters)
if counter_type != 'none':
counter_value = counter_values.get(counter_name, [0])[-1]
texts.append(counter_style.render_value(
counter_value, counter_type))
elif type_ == 'counters()':
counter_name, separator, counter_type = value
if need_collect_missing:
_collect_missing_counter(
counter_name, counter_values, missing_counters)
if counter_type != 'none':
texts.append(separator.join(
counter_style.render_value(counter_value, counter_type)
for counter_value
in counter_values.get(counter_name, [0])))
elif type_ == 'string()':
if not in_page_context:
# string() is currently only valid in @page context
# See https://github.com/Kozea/WeasyPrint/issues/723
LOGGER.warning(
'"string(%s)" is only allowed in page margins' %
(' '.join(value)))
continue
texts.append(context.get_string_set_for(page, *value) or '')
elif type_ == 'target-counter()':
anchor_token, counter_name, counter_type = value
lookup_target = target_collector.lookup_target(
anchor_token, parent_box, css_token, parse_again)
if lookup_target.state == 'up-to-date':
target_values = lookup_target.target_box.cached_counter_values
if need_collect_missing:
_collect_missing_target_counter(
counter_name, target_values,
target_collector.anchor_name_from_token(anchor_token),
missing_target_counters)
# Mixin target's cached page counters.
# cached_page_counter_values are empty during layout.
local_counters = (
lookup_target.cached_page_counter_values.copy())
local_counters.update(target_values)
if counter_type != 'none':
counter_value = local_counters.get(counter_name, [0])[-1]
texts.append(counter_style.render_value(
counter_value, counter_type))
else:
texts = []
break
elif type_ == 'target-counters()':
anchor_token, counter_name, separator, counter_type = value
lookup_target = target_collector.lookup_target(
anchor_token, parent_box, css_token, parse_again)
if lookup_target.state == 'up-to-date':
if separator[0] != 'string':
break
separator_string = separator[1]
target_values = lookup_target.target_box.cached_counter_values
if need_collect_missing:
_collect_missing_target_counter(
counter_name, target_values,
target_collector.anchor_name_from_token(anchor_token),
missing_target_counters)
# Mixin target's cached page counters.
# cached_page_counter_values are empty during layout.
local_counters = (
lookup_target.cached_page_counter_values.copy())
local_counters.update(target_values)
if counter_type != 'none':
texts.append(separator_string.join(
counter_style.render_value(counter_value, counter_type)
for counter_value
in local_counters.get(counter_name, [0])))
else:
texts = []
break
elif type_ == 'target-text()':
anchor_token, text_style = value
lookup_target = target_collector.lookup_target(
anchor_token, parent_box, css_token, parse_again)
if lookup_target.state == 'up-to-date':
target_box = lookup_target.target_box
# TODO: 'before'- and 'after'- content referring missing
# counters are not properly set.
text = TEXT_CONTENT_EXTRACTORS[text_style](target_box)
# Simulate the step of white space processing
# (normally done during the layout)
texts.append(text.strip())
else:
texts = []
break
elif (type_ == 'quote' and
quote_depth is not None and
quote_style is not None):
is_open = 'open' in value
insert = not value.startswith('no-')
if not is_open:
quote_depth[0] = max(0, quote_depth[0] - 1)
if insert:
open_quotes, close_quotes = quote_style
quotes = open_quotes if is_open else close_quotes
texts.append(quotes[min(quote_depth[0], len(quotes) - 1)])
if is_open:
quote_depth[0] += 1
elif type_ == 'element()':
if not in_page_context:
LOGGER.warning(
'"element(%s)" is only allowed in page margins' %
(' '.join(value)))
continue
new_box = context.get_running_element_for(page, *value)
if new_box is None:
continue
new_box = new_box.deepcopy()
new_box.style['position'] = 'static'
for child in new_box.descendants():
if child.style['content'] in ('normal', 'none'):
continue
child.children = content_to_boxes(
child.style, child, quote_depth, counter_values,
get_image_from_uri, target_collector, counter_style,
context=context, page=page)
boxlist.append(new_box)
text = ''.join(texts)
if text:
boxlist.append(boxes.TextBox.anonymous_from(parent_box, text))
# Only add CounterLookupItem if the content_list actually produced text
target_collector.collect_missing_counters(
parent_box, css_token, parse_again, missing_counters,
missing_target_counters)
return boxlist if (texts or boxlist) else None
def content_to_boxes(style, parent_box, quote_depth, counter_values,
get_image_from_uri, target_collector, counter_style,
context=None, page=None):
"""Take the value of a ``content`` property and return boxes."""
def parse_again(mixin_pagebased_counters=None):
"""Closure to parse the ``parent_boxes`` children all again."""
# Neither alters the mixed-in nor the cached counter values, no
# need to deepcopy here
if mixin_pagebased_counters is None:
local_counters = {}
else:
local_counters = mixin_pagebased_counters.copy()
local_counters.update(parent_box.cached_counter_values)
local_children = []
local_children.extend(content_to_boxes(
style, parent_box, orig_quote_depth, local_counters,
get_image_from_uri, target_collector, counter_style))
# TODO: do we need to add markers here?
# TODO: redo the formatting structure of the parent instead of hacking
# the already formatted structure. Find why inline_in_blocks has
# sometimes already been called, and sometimes not.
if (len(parent_box.children) == 1 and
isinstance(parent_box.children[0], boxes.LineBox)):
parent_box.children[0].children = local_children
else:
parent_box.children = local_children
if style['content'] == 'inhibit':
return []
orig_quote_depth = quote_depth[:]
css_token = 'content'
box_list = compute_content_list(
style['content'], parent_box, counter_values, css_token, parse_again,
target_collector, counter_style, get_image_from_uri, quote_depth,
style['quotes'], context, page)
return box_list or []
def compute_string_set(element, box, string_name, content_list,
counter_values, target_collector, counter_style):
"""Parse the content-list value of ``string_name`` for ``string-set``."""
def parse_again(mixin_pagebased_counters=None):
"""Closure to parse the string-set string value all again."""
# Neither alters the mixed-in nor the cached counter values, no
# need to deepcopy here
if mixin_pagebased_counters is None:
local_counters = {}
else:
local_counters = mixin_pagebased_counters.copy()
local_counters.update(box.cached_counter_values)
compute_string_set(
element, box, string_name, content_list, local_counters,
target_collector, counter_style)
css_token = 'string-set::%s' % string_name
box_list = compute_content_list(
content_list, box, counter_values, css_token, parse_again,
target_collector, counter_style, element=element)
if box_list is not None:
string = ''.join(
box.text for box in box_list if isinstance(box, boxes.TextBox))
# Avoid duplicates, care for parse_again and missing counters, don't
# change the pointer
for string_set_tuple in box.string_set:
if string_set_tuple[0] == string_name:
box.string_set.remove(string_set_tuple)
break
box.string_set.append((string_name, string))
def compute_bookmark_label(element, box, content_list, counter_values,
target_collector, counter_style):
"""Parses the content-list value for ``bookmark-label``."""
def parse_again(mixin_pagebased_counters={}):
"""Closure to parse the bookmark-label all again."""
# Neither alters the mixed-in nor the cached counter values, no
# need to deepcopy here
if mixin_pagebased_counters is None:
local_counters = {}
else:
local_counters = mixin_pagebased_counters.copy()
local_counters = mixin_pagebased_counters.copy()
local_counters.update(box.cached_counter_values)
compute_bookmark_label(
element, box, content_list, local_counters, target_collector,
counter_style)
css_token = 'bookmark-label'
box_list = compute_content_list(
content_list, box, counter_values, css_token, parse_again,
target_collector, counter_style, element=element)
if box_list is None:
box.bookmark_label = ''
else:
box.bookmark_label = ''.join(
box.text for box in box_list if isinstance(box, boxes.TextBox))
def set_content_lists(element, box, style, counter_values, target_collector,
counter_style):
"""Set the content-lists values.
These content-lists are used in GCPM properties like ``string-set`` and
``bookmark-label``.
"""
box.string_set = []
if style['string_set'] != 'none':
for i, (string_name, string_values) in enumerate(style['string_set']):
compute_string_set(
element, box, string_name, string_values, counter_values,
target_collector, counter_style)
if style['bookmark_label'] == 'none':
box.bookmark_label = ''
else:
compute_bookmark_label(
element, box, style['bookmark_label'], counter_values,
target_collector, counter_style)
def update_counters(state, style):
"""Handle the ``counter-*`` properties."""
_quote_depth, counter_values, counter_scopes = state
sibling_scopes = counter_scopes[-1]
for name, value in style['counter_reset']:
if name in sibling_scopes:
counter_values[name].pop()
else:
sibling_scopes.add(name)
counter_values.setdefault(name, []).append(value)
for name, value in style['counter_set']:
values = counter_values.setdefault(name, [])
if not values:
assert name not in sibling_scopes
sibling_scopes.add(name)
values.append(0)
values[-1] = value
counter_increment = style['counter_increment']
if counter_increment == 'auto':
# 'auto' is the initial value but is not valid in stylesheet:
# there was no counter-increment declaration for this element.
# (Or the winning value was 'initial'.)
# http://dev.w3.org/csswg/css3-lists/#declaring-a-list-item
if style['display'] == 'list-item':
counter_increment = [('list-item', 1)]
else:
counter_increment = []
for name, value in counter_increment:
values = counter_values.setdefault(name, [])
if not values:
assert name not in sibling_scopes
sibling_scopes.add(name)
values.append(0)
values[-1] += value
def is_whitespace(box, _has_non_whitespace=re.compile('\\S').search):
"""Return True if ``box`` is a TextBox with only whitespace."""
return isinstance(box, boxes.TextBox) and not _has_non_whitespace(box.text)
def wrap_improper(box, children, wrapper_type, test=None):
"""
Wrap consecutive children that do not pass ``test`` in a box of type
``wrapper_type``.
``test`` defaults to children being of the same type as ``wrapper_type``.
"""
if test is None:
def test(child):
return isinstance(child, wrapper_type)
improper = []
for child in children:
if test(child):
if improper:
wrapper = wrapper_type.anonymous_from(box, children=[])
# Apply the rules again on the new wrapper
yield table_boxes_children(wrapper, improper)
improper = []
yield child
else:
# Whitespace either fail the test or were removed earlier,
# so there is no need to take special care with the definition
# of "consecutive".
if isinstance(box, boxes.FlexContainerBox):
# The display value of a flex item must be "blockified", see
# https://www.w3.org/TR/css-flexbox-1/#flex-items
# TODO: These blocks are currently ignored, we should
# "blockify" them and their children.
pass
else:
improper.append(child)
if improper:
wrapper = wrapper_type.anonymous_from(box, children=[])
# Apply the rules again on the new wrapper
yield table_boxes_children(wrapper, improper)
def anonymous_table_boxes(box):
"""Remove and add boxes according to the table model.
Take and return a ``Box`` object.
See http://www.w3.org/TR/CSS21/tables.html#anonymous-boxes
"""
if not isinstance(box, boxes.ParentBox) or box.is_running():
return box
# Do recursion.
children = [anonymous_table_boxes(child) for child in box.children]
return table_boxes_children(box, children)
def table_boxes_children(box, children):
"""Internal implementation of anonymous_table_boxes()."""
if isinstance(box, boxes.TableColumnBox): # rule 1.1
# Remove all children.
children = []
elif isinstance(box, boxes.TableColumnGroupBox): # rule 1.2
# Remove children other than table-column.
children = [
child for child in children
if isinstance(child, boxes.TableColumnBox)
]
# Rule XXX (not in the spec): column groups have at least
# one column child.
if not children:
children = [boxes.TableColumnBox.anonymous_from(box, [])
for _i in range(box.span)]
# rule 1.3
if box.tabular_container and len(children) >= 2:
# TODO: Maybe only remove text if internal is also
# a proper table descendant of box.
# This is what the spec says, but maybe not what browsers do:
# http://lists.w3.org/Archives/Public/www-style/2011Oct/0567
# Last child
internal, text = children[-2:]
if (internal.internal_table_or_caption and is_whitespace(text)):
children.pop()
# First child
if len(children) >= 2:
text, internal = children[:2]
if (internal.internal_table_or_caption and is_whitespace(text)):
children.pop(0)
# Children other than first and last that would be removed by
# rule 1.3 are also removed by rule 1.4 below.
children = [
child
for prev_child, child, next_child in zip(
[None] + children[:-1],
children,
children[1:] + [None]
)
if not (
# Ignore some whitespace: rule 1.4
prev_child and prev_child.internal_table_or_caption and
next_child and next_child.internal_table_or_caption and
is_whitespace(child)
)
]
if isinstance(box, boxes.TableBox):
# Rule 2.1
children = wrap_improper(
box, children, boxes.TableRowBox,
lambda child: child.proper_table_child)
elif isinstance(box, boxes.TableRowGroupBox):
# Rule 2.2
children = wrap_improper(box, children, boxes.TableRowBox)
if isinstance(box, boxes.TableRowBox):
# Rule 2.3
children = wrap_improper(box, children, boxes.TableCellBox)
else:
# Rule 3.1
children = wrap_improper(
box, children, boxes.TableRowBox,
lambda child: not isinstance(child, boxes.TableCellBox))
# Rule 3.2
if isinstance(box, boxes.InlineBox):
children = wrap_improper(
box, children, boxes.InlineTableBox,
lambda child: not child.proper_table_child)
else:
parent_type = type(box)
children = wrap_improper(
box, children, boxes.TableBox,
lambda child: (not child.proper_table_child or
parent_type in child.proper_parents))
if isinstance(box, boxes.TableBox):
return wrap_table(box, children)
else:
box.children = list(children)
return box
def wrap_table(box, children):
"""Take a table box and return it in its table wrapper box.
Also re-order children and assign grid positions to each column and cell.
Because of colspan/rowspan works, grid_y is implicitly the index of a row,
but grid_x is an explicit attribute on cells, columns and column group.
http://www.w3.org/TR/CSS21/tables.html#model
http://www.w3.org/TR/CSS21/tables.html#table-layout
"""
# Group table children by type
columns = []
rows = []
all_captions = []
by_type = {
boxes.TableColumnBox: columns,
boxes.TableColumnGroupBox: columns,
boxes.TableRowBox: rows,
boxes.TableRowGroupBox: rows,
boxes.TableCaptionBox: all_captions,
}
for child in children:
by_type[type(child)].append(child)
# Split top and bottom captions
captions = {'top': [], 'bottom': []}
for caption in all_captions:
captions[caption.style['caption_side']].append(caption)
# Assign X positions on the grid to column boxes
column_groups = list(wrap_improper(
box, columns, boxes.TableColumnGroupBox))
grid_x = 0
for group in column_groups:
group.grid_x = grid_x
if group.children:
for column in group.children:
# There's no need to take care of group's span, as "span=x"
# already generates x TableColumnBox children
column.grid_x = grid_x
grid_x += 1
group.span = len(group.children)
else:
grid_x += group.span
grid_width = grid_x
row_groups = wrap_improper(box, rows, boxes.TableRowGroupBox)
# Extract the optional header and footer groups.
body_row_groups = []
header = None
footer = None
for group in row_groups:
display = group.style['display']
if display == 'table-header-group' and header is None:
group.is_header = True
header = group
elif display == 'table-footer-group' and footer is None:
group.is_footer = True
footer = group
else:
body_row_groups.append(group)
row_groups = (
([header] if header is not None else []) +
body_row_groups +
([footer] if footer is not None else []))
# Assign a (x,y) position in the grid to each cell.
# rowspan can not extend beyond a row group, so each row group
# is independent.
# http://www.w3.org/TR/CSS21/tables.html#table-layout
# Column 0 is on the left if direction is ltr, right if rtl.
# This algorithm does not change.
grid_height = 0
for group in row_groups:
# Indexes: row number in the group.
# Values: set of cells already occupied by row-spanning cells.
occupied_cells_by_row = [set() for row in group.children]
for row in group.children:
occupied_cells_in_this_row = occupied_cells_by_row.pop(0)
# The list is now about rows after this one.
grid_x = 0
for cell in row.children:
# Make sure that the first grid cell is free.
while grid_x in occupied_cells_in_this_row:
grid_x += 1
cell.grid_x = grid_x
new_grid_x = grid_x + cell.colspan
# http://www.w3.org/TR/html401/struct/tables.html#adef-rowspan
if cell.rowspan != 1:
max_rowspan = len(occupied_cells_by_row) + 1
if cell.rowspan == 0:
# All rows until the end of the group
spanned_rows = occupied_cells_by_row
cell.rowspan = max_rowspan
else:
cell.rowspan = min(cell.rowspan, max_rowspan)
spanned_rows = occupied_cells_by_row[:cell.rowspan - 1]
spanned_columns = range(grid_x, new_grid_x)
for occupied_cells in spanned_rows:
occupied_cells.update(spanned_columns)
grid_x = new_grid_x
grid_width = max(grid_width, grid_x)
grid_height += len(group.children)
table = box.copy_with_children(row_groups)
table.column_groups = tuple(column_groups)
if table.style['border_collapse'] == 'collapse':
table.collapsed_border_grid = collapse_table_borders(
table, grid_width, grid_height)
if isinstance(box, boxes.InlineTableBox):
wrapper_type = boxes.InlineBlockBox
else:
wrapper_type = boxes.BlockBox
wrapper = wrapper_type.anonymous_from(
box, captions['top'] + [table] + captions['bottom'])
wrapper.style = wrapper.style.copy()
wrapper.is_table_wrapper = True
# Non-inherited properties of the table element apply to one
# of the wrapper and the table. The other get the initial value.
# TODO: put this in a method of the table object
for name in properties.TABLE_WRAPPER_BOX_PROPERTIES:
wrapper.style[name] = table.style[name]
table.style[name] = properties.INITIAL_VALUES[name]
return wrapper
TRANSPARENT = tinycss2.color3.parse_color('transparent')
def collapse_table_borders(table, grid_width, grid_height):
"""Resolve border conflicts for a table in the collapsing border model.
Take a :class:`TableBox`; set appropriate border widths on the table,
column group, column, row group, row, and cell boxes; and return
a data structure for the resolved collapsed border grid.
"""
if not (grid_width and grid_height):
# Don’t bother with empty tables
return [], []
style_scores = dict((v, i) for i, v in enumerate(reversed([
'hidden', 'double', 'solid', 'dashed', 'dotted', 'ridge',
'outset', 'groove', 'inset', 'none'])))
style_map = {'inset': 'ridge', 'outset': 'groove'}
transparent = TRANSPARENT
weak_null_border = (
(0, 0, style_scores['none']), ('none', 0, transparent))
vertical_borders = [[weak_null_border for x in range(grid_width + 1)]
for y in range(grid_height)]
horizontal_borders = [[weak_null_border for x in range(grid_width)]
for y in range(grid_height + 1)]
def set_one_border(border_grid, box_style, side, grid_x, grid_y):
from ..draw import get_color
style = box_style['border_%s_style' % side]
width = box_style['border_%s_width' % side]
color = get_color(box_style, 'border_%s_color' % side)
# http://www.w3.org/TR/CSS21/tables.html#border-conflict-resolution
score = ((1 if style == 'hidden' else 0), width, style_scores[style])
style = style_map.get(style, style)
previous_score, _ = border_grid[grid_y][grid_x]
# Strict < so that the earlier call wins in case of a tie.
if previous_score < score:
border_grid[grid_y][grid_x] = (score, (style, width, color))
def set_borders(box, x, y, w, h):
style = box.style
for yy in range(y, y + h):
set_one_border(vertical_borders, style, 'left', x, yy)
set_one_border(vertical_borders, style, 'right', x + w, yy)
for xx in range(x, x + w):
set_one_border(horizontal_borders, style, 'top', xx, y)
set_one_border(horizontal_borders, style, 'bottom', xx, y + h)
# The order is important here:
# "A style set on a cell wins over one on a row, which wins over a
# row group, column, column group and, lastly, table"
# See http://www.w3.org/TR/CSS21/tables.html#border-conflict-resolution
strong_null_border = (
(1, 0, style_scores['hidden']), ('hidden', 0, transparent))
grid_y = 0
for row_group in table.children:
for row in row_group.children:
for cell in row.children:
# No border inside of a cell with rowspan or colspan
for xx in range(cell.grid_x + 1, cell.grid_x + cell.colspan):
for yy in range(grid_y, grid_y + cell.rowspan):
vertical_borders[yy][xx] = strong_null_border
for xx in range(cell.grid_x, cell.grid_x + cell.colspan):
for yy in range(grid_y + 1, grid_y + cell.rowspan):
horizontal_borders[yy][xx] = strong_null_border
# The cell’s own borders
set_borders(cell, x=cell.grid_x, y=grid_y,
w=cell.colspan, h=cell.rowspan)
grid_y += 1
grid_y = 0
for row_group in table.children:
for row in row_group.children:
set_borders(row, x=0, y=grid_y, w=grid_width, h=1)
grid_y += 1
grid_y = 0
for row_group in table.children:
rowspan = len(row_group.children)
set_borders(row_group, x=0, y=grid_y, w=grid_width, h=rowspan)
grid_y += rowspan
for column_group in table.column_groups:
for column in column_group.children:
set_borders(column, x=column.grid_x, y=0, w=1, h=grid_height)
for column_group in table.column_groups:
set_borders(column_group, x=column_group.grid_x, y=0,
w=column_group.span, h=grid_height)
set_borders(table, x=0, y=0, w=grid_width, h=grid_height)
# Now that all conflicts are resolved, set transparent borders of
# the correct widths on each box. The actual border grid will be
# painted separately.
def set_transparent_border(box, side, twice_width):
box.style['border_%s_style' % side] = 'solid'
box.style['border_%s_width' % side] = twice_width / 2
box.style['border_%s_color' % side] = transparent
def remove_borders(box):
set_transparent_border(box, 'top', 0)
set_transparent_border(box, 'right', 0)
set_transparent_border(box, 'bottom', 0)
set_transparent_border(box, 'left', 0)
def max_vertical_width(x, y, h):
return max(
width for grid_row in vertical_borders[y:y + h]
for _, (_, width, _) in [grid_row[x]])
def max_horizontal_width(x, y, w):
return max(
width for _, (_, width, _) in horizontal_borders[y][x:x + w])
grid_y = 0
for row_group in table.children:
remove_borders(row_group)
for row in row_group.children:
remove_borders(row)
for cell in row.children:
set_transparent_border(cell, 'top', max_horizontal_width(
x=cell.grid_x, y=grid_y, w=cell.colspan))
set_transparent_border(cell, 'bottom', max_horizontal_width(
x=cell.grid_x, y=grid_y + cell.rowspan, w=cell.colspan))
set_transparent_border(cell, 'left', max_vertical_width(
x=cell.grid_x, y=grid_y, h=cell.rowspan))
set_transparent_border(cell, 'right', max_vertical_width(
x=cell.grid_x + cell.colspan, y=grid_y, h=cell.rowspan))
grid_y += 1
for column_group in table.column_groups:
remove_borders(column_group)
for column in column_group.children:
remove_borders(column)
set_transparent_border(table, 'top', max_horizontal_width(
x=0, y=0, w=grid_width))
set_transparent_border(table, 'bottom', max_horizontal_width(
x=0, y=grid_height, w=grid_width))
# "UAs must compute an initial left and right border width for the table
# by examining the first and last cells in the first row of the table."
# http://www.w3.org/TR/CSS21/tables.html#collapsing-borders
# ... so h=1, not grid_height:
set_transparent_border(table, 'left', max_vertical_width(
x=0, y=0, h=1))
set_transparent_border(table, 'right', max_vertical_width(
x=grid_width, y=0, h=1))
return vertical_borders, horizontal_borders
def flex_boxes(box):
"""Remove and add boxes according to the flex model.
Take and return a ``Box`` object.
See http://www.w3.org/TR/css-flexbox-1/#flex-items
"""
if not isinstance(box, boxes.ParentBox) or box.is_running():
return box
# Do recursion.
children = [flex_boxes(child) for child in box.children]
box.children = flex_children(box, children)
return box
def flex_children(box, children):
if isinstance(box, boxes.FlexContainerBox):
flex_children = []
for child in children:
if not child.is_absolutely_positioned():
child.is_flex_item = True
if isinstance(child, boxes.TextBox) and not child.text.strip(' '):
# TODO: ignore texts only containing "characters that can be
# affected by the white-space property"
# https://www.w3.org/TR/css-flexbox-1/#flex-items
continue
if isinstance(child, boxes.InlineLevelBox):
# TODO: Only create block boxes for text runs, not for other
# inline level boxes. This is false but currently needed
# because block_level_width and block_level_layout are called
# in layout.flex.
if isinstance(child, boxes.ParentBox):
anonymous = boxes.BlockBox.anonymous_from(
box, child.children)
anonymous.style = child.style
else:
anonymous = boxes.BlockBox.anonymous_from(box, [child])
anonymous.is_flex_item = True
flex_children.append(anonymous)
else:
flex_children.append(child)
return flex_children
else:
return children
def process_whitespace(box, following_collapsible_space=False):
"""First part of "The 'white-space' processing model".
See http://www.w3.org/TR/CSS21/text.html#white-space-model
http://dev.w3.org/csswg/css3-text/#white-space-rules
"""
if isinstance(box, boxes.TextBox):
text = box.text
if not text:
return following_collapsible_space
# Normalize line feeds
text = re.sub('\r\n?', '\n', text)
new_line_collapse = box.style['white_space'] in ('normal', 'nowrap')
space_collapse = box.style['white_space'] in (
'normal', 'nowrap', 'pre-line')
if space_collapse:
# \r characters were removed/converted earlier
text = re.sub('[\t ]*\n[\t ]*', '\n', text)
if new_line_collapse:
# TODO: this should be language-specific
# Could also replace with a zero width space character (U+200B),
# or no character
# CSS3: http://www.w3.org/TR/css3-text/#line-break-transform
text = text.replace('\n', ' ')
if space_collapse:
text = text.replace('\t', ' ')
text = re.sub(' +', ' ', text)
previous_text = text
if following_collapsible_space and text.startswith(' '):
text = text[1:]
box.leading_collapsible_space = True
following_collapsible_space = previous_text.endswith(' ')
else:
following_collapsible_space = False
box.text = text
return following_collapsible_space
if isinstance(box, boxes.ParentBox) and not box.is_running():
for child in box.children:
if isinstance(child, (boxes.TextBox, boxes.InlineBox)):
following_collapsible_space = process_whitespace(
child, following_collapsible_space)
else:
process_whitespace(child)
if child.is_in_normal_flow():
following_collapsible_space = False
return following_collapsible_space
def inline_in_block(box):
"""Build the structure of lines inside blocks and return a new box tree.
Consecutive inline-level boxes in a block container box are wrapped into a
line box, itself wrapped into an anonymous block box.
This line box will be broken into multiple lines later.
This is the first case in
http://www.w3.org/TR/CSS21/visuren.html#anonymous-block-level
Eg.::
BlockBox[
TextBox['Some '],
InlineBox[TextBox['text']],
BlockBox[
TextBox['More text'],
]
]
is turned into::
BlockBox[
AnonymousBlockBox[
LineBox[
TextBox['Some '],
InlineBox[TextBox['text']],
]
]
BlockBox[
LineBox[
TextBox['More text'],
]
]
]
"""
if not isinstance(box, boxes.ParentBox) or box.is_running():
return box
box_children = list(box.children)
if box_children and box.leading_collapsible_space is False:
box.leading_collapsible_space = (
box_children[0].leading_collapsible_space)
children = []
trailing_collapsible_space = False
for child in box_children:
# Keep track of removed collapsing spaces for wrap opportunities, and
# remove empty text boxes.
# (They may have been emptied by process_whitespace().)
if trailing_collapsible_space:
child.leading_collapsible_space = True
if isinstance(child, boxes.TextBox) and not child.text:
trailing_collapsible_space = child.leading_collapsible_space
else:
trailing_collapsible_space = False
children.append(inline_in_block(child))
if box.trailing_collapsible_space is False:
box.trailing_collapsible_space = trailing_collapsible_space
if not isinstance(box, boxes.BlockContainerBox):
box.children = children
return box
new_line_children = []
new_children = []
for child_box in children:
assert not isinstance(child_box, boxes.LineBox)
if new_line_children and child_box.is_absolutely_positioned():
new_line_children.append(child_box)
elif isinstance(child_box, boxes.InlineLevelBox) or (
new_line_children and child_box.is_floated()):
# Do not append white space at the start of a line:
# It would be removed during layout.
if new_line_children or not (
isinstance(child_box, boxes.TextBox) and
# Sequence of white-space was collapsed to a single
# space by process_whitespace().
child_box.text == ' ' and
child_box.style['white_space'] in (
'normal', 'nowrap', 'pre-line')):
new_line_children.append(child_box)
else:
if new_line_children:
# Inlines are consecutive no more: add this line box
# and create a new one.
line_box = boxes.LineBox.anonymous_from(box, new_line_children)
anonymous = boxes.BlockBox.anonymous_from(box, [line_box])
new_children.append(anonymous)
new_line_children = []
new_children.append(child_box)
if new_line_children:
# There were inlines at the end
line_box = boxes.LineBox.anonymous_from(box, new_line_children)
if new_children:
anonymous = boxes.BlockBox.anonymous_from(box, [line_box])
new_children.append(anonymous)
else:
# Only inline-level children: one line box
new_children.append(line_box)
box.children = new_children
return box
def block_in_inline(box):
"""Build the structure of blocks inside lines.
Inline boxes containing block-level boxes will be broken in two
boxes on each side on consecutive block-level boxes, each side wrapped
in an anonymous block-level box.
This is the second case in
http://www.w3.org/TR/CSS21/visuren.html#anonymous-block-level
Eg. if this is given::
BlockBox[
LineBox[
InlineBox[
TextBox['Hello.'],
],
InlineBox[
TextBox['Some '],
InlineBox[
TextBox['text']
BlockBox[LineBox[TextBox['More text']]],
BlockBox[LineBox[TextBox['More text again']]],
],
BlockBox[LineBox[TextBox['And again.']]],
]
]
]
this is returned::
BlockBox[
AnonymousBlockBox[
LineBox[
InlineBox[
TextBox['Hello.'],
],
InlineBox[
TextBox['Some '],
InlineBox[TextBox['text']],
]
]
],
BlockBox[LineBox[TextBox['More text']]],
BlockBox[LineBox[TextBox['More text again']]],
AnonymousBlockBox[
LineBox[
InlineBox[
]
]
],
BlockBox[LineBox[TextBox['And again.']]],
AnonymousBlockBox[
LineBox[
InlineBox[
]
]
],
]
"""
if not isinstance(box, boxes.ParentBox) or box.is_running():
return box
new_children = []
changed = False
for child in box.children:
if isinstance(child, boxes.LineBox):
assert len(box.children) == 1, (
'Line boxes should have no '
'siblings at this stage, got %r.' % box.children)
stack = None
while 1:
new_line, block, stack = _inner_block_in_inline(
child, skip_stack=stack)
if block is None:
break
anon = boxes.BlockBox.anonymous_from(box, [new_line])
new_children.append(anon)
new_children.append(block_in_inline(block))
# Loop with the same child and the new stack.
if new_children:
# Some children were already added, this became a block
# context.
new_child = boxes.BlockBox.anonymous_from(box, [new_line])
else:
# Keep the single line box as-is, without anonymous blocks.
new_child = new_line
else:
# Not in an inline formatting context.
new_child = block_in_inline(child)
if new_child is not child:
changed = True
new_children.append(new_child)
if changed:
box.children = new_children
return box
def _inner_block_in_inline(box, skip_stack=None):
"""Find a block-level box in an inline formatting context.
If one is found, return ``(new_box, block_level_box, resume_at)``.
``new_box`` contains all of ``box`` content before the block-level box.
``resume_at`` can be passed as ``skip_stack`` in a new call to
this function to resume the search just after the block-level box.
If no block-level box is found after the position marked by
``skip_stack``, return ``(new_box, None, None)``
"""
new_children = []
block_level_box = None
resume_at = None
changed = False
is_start = skip_stack is None
if is_start:
skip = 0
else:
skip, skip_stack = skip_stack
for i, child in enumerate(box.children[skip:]):
index = i + skip
if isinstance(child, boxes.BlockLevelBox) and \
child.is_in_normal_flow():
assert skip_stack is None # Should not skip here
block_level_box = child
index += 1 # Resume *after* the block
else:
if isinstance(child, boxes.InlineBox):
recursion = _inner_block_in_inline(child, skip_stack)
skip_stack = None
new_child, block_level_box, resume_at = recursion
else:
assert skip_stack is None # Should not skip here
new_child = block_in_inline(child)
# block_level_box is still None.
if new_child is not child:
changed = True
new_children.append(new_child)
if block_level_box is not None:
resume_at = (index, resume_at)
box = box.copy_with_children(new_children)
break
else:
if changed or skip:
box = box.copy_with_children(new_children)
return box, block_level_box, resume_at
def set_viewport_overflow(root_box):
"""
Set a ``viewport_overflow`` attribute on the box for the root element.
Like backgrounds, ``overflow`` on the root element must be propagated
to the viewport.
See http://www.w3.org/TR/CSS21/visufx.html#overflow
"""
chosen_box = root_box
if (root_box.element_tag.lower() == 'html' and
root_box.style['overflow'] == 'visible'):
for child in root_box.children:
if child.element_tag.lower() == 'body':
chosen_box = child
break
root_box.viewport_overflow = chosen_box.style['overflow']
chosen_box.style['overflow'] = 'visible'
return root_box
def box_text(box):
if isinstance(box, boxes.TextBox):
return box.text
elif isinstance(box, boxes.ParentBox):
return ''.join(
child.text for child in box.descendants()
if not child.element_tag.endswith('::before') and
not child.element_tag.endswith('::after') and
not child.element_tag.endswith('::marker') and
isinstance(child, boxes.TextBox))
else:
return ''
def box_text_first_letter(box):
# TODO: use the same code as in inlines.first_letter_to_box
character_found = False
first_letter = ''
text = box_text(box)
while text:
next_letter = text[0]
category = unicodedata.category(next_letter)
if category not in ('Ps', 'Pe', 'Pi', 'Pf', 'Po'):
if character_found:
break
character_found = True
first_letter += next_letter
text = text[1:]
return first_letter
def box_text_before(box):
if isinstance(box, boxes.ParentBox):
return ''.join(
box_text(child) for child in box.descendants()
if child.element_tag.endswith('::before') and
not isinstance(child, boxes.ParentBox))
else:
return ''
def box_text_after(box):
if isinstance(box, boxes.ParentBox):
return ''.join(
box_text(child) for child in box.descendants()
if child.element_tag.endswith('::after') and
not isinstance(child, boxes.ParentBox))
else:
return ''
TEXT_CONTENT_EXTRACTORS = {
'text': box_text,
'content': box_text,
'before': box_text_before,
'after': box_text_after,
'first-letter': box_text_first_letter}
|
the-stack_106_26500 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
from PIL import ImageEnhance
df = pd.read_csv("datasets/eumothers.csv").set_index("country")
FONT = sans
PALETTE = {
"IWD": VegaPalette10.RED,
"SE": VegaPalette10.GREEN,
"FSL": VegaPalette10.ORANGE,
"FSM": VegaPalette10.LIGHTBLUE,
"SSM": VegaPalette10.BLUE,
"LSM": VegaPalette10.PURPLE,
"other": VegaPalette10.GREY,
}
DESCRIPTIONS = [
"**International Women's Day** (8 March)",
"**Spring Equinox** (21 March)",
"**Fourth Sunday in Lent** (Mothering Sunday)",
"**First Sunday of May**",
"**Second Sunday of May**",
"**Last Sunday of May**\n(may be postponed in France for Pentecost)",
"""**On a different day**
Norway: Second Sunday of February
Israel: 30 Shevat (~February)
Georgia: 3 March
Slovenia: 25 March
Armenia: 7 April
Iran: 21 Ordibehesht (~11 May)
Poland: 26 May
Luxembourg: Second Sunday of June
Belarus: 14 October
Serbia: Second Sunday before Christmas""",
]
FOOTER = None
def colorfn(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index or non(df.group.get(c)): return "grey"
elif "&" in df.group.get(c):
colors = [PALETTE[i] for i in df.group[c].split("&")]
return Stripe(20, *colors)
elif "|" in df.group.get(c):
return VegaPalette10.BROWN
else: return PALETTE[df.group.get(c)]
map = map_chart("maps/Europe2.png", colorfn, None)
legend = generate_legend(list(PALETTE.values()), DESCRIPTIONS, box_sizes=(40,...), header="Mother's Day mainly celebrated on...".upper(), footer=FOOTER, font_family=partial(FONT, 16))
chart = map.place(legend, align=(1,0), padding=50)
title = Image.from_column([
Image.from_text("MOTHER'S DAYS", FONT(96, bold=True)),
Image.from_text("date of main Mother's Day celebrations (according to Wikipedia)", FONT(36))
],
bg="white", padding=2)
img = Image.from_column([title, chart], bg="white", padding=2)
img = ImageEnhance.Color(img).enhance(0.8)
img.place(Image.from_text("/u/Udzu", FONT(16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/eumothers.png")
|
the-stack_106_26502 |
# import the definition of the steps and input files:
from Configuration.PyReleaseValidation.relval_steps import *
# here only define the workflows as a combination of the steps defined above:
workflows = Matrix()
# each workflow defines a name and a list of steps to be done.
# if no explicit name/label given for the workflow (first arg),
# the name of step1 will be used
from Configuration.PyReleaseValidation.relval_upgrade import workflows as _upgrade_workflows
#just define all of them
#WFs to run in IB:
# mc 2018 (Patatrack pixel-only quadruplets: ZMM - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only triplets: ZMM - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only quadruplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only triplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack ECAL-only: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack HCAL-only: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack full reco with pixel quadruplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack full reco with pixel triplets: TTbar - on GPU, both CPU and GPU, auto)
# mc 2021 (Patatrack pixel-only quadruplets: ZMM - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only triplets: ZMM - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only quadruplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack pixel-only triplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack ECAL-only: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack HCAL-only: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack full reco with pixel quadruplets: TTbar - on GPU, both CPU and GPU, auto)
# (Patatrack full reco with pixel triplets: TTbar - on GPU, both CPU and GPU, auto)
numWFIB = [
10842.502, # 10842.503,10842.504,
10842.506, # 10842.507,10842.508,
10824.502, # 10824.503,10824.504,
10824.506, # 10824.507,10824.508,
10824.512, # 10824.513,10824.514,
10824.522, # 10824.523,10824.524,
10824.592, # 10824.593,10824.594,
10824.596, # 10824.597,10824.598,
11650.502, # 11650.503,11650.504,
11650.506, # 11650.507,11650.508,
11634.502, # 11634.503,11634.504,
11634.506, # 11634.507,11634.508,
11634.512, # 11634.513,11634.514,
11634.522, # 11634.523,11634.524
11634.592, # 11634.593,11634.594,
11634.596, # 11634.597,11634.598,
]
for numWF in numWFIB:
if not numWF in _upgrade_workflows: continue
workflows[numWF] = _upgrade_workflows[numWF]
# data 2018 (Patatrack pixel-only: RunHLTPhy2018D, RunJetHT2018D on GPU)
# (Patatrack ECAL-only: RunHLTPhy2018D, RunJetHT2018D on GPU)
# (Patatrack HCAL-only: RunHLTPhy2018D, RunJetHT2018D on GPU)
workflows[136.885502] = ['',['RunHLTPhy2018D','HLTDR2_2018','RECODR2_2018reHLT_Patatrack_PixelOnlyGPU','HARVEST2018_pixelTrackingOnly']]
workflows[136.888502] = ['',['RunJetHT2018D','HLTDR2_2018','RECODR2_2018reHLT_Patatrack_PixelOnlyGPU','HARVEST2018_pixelTrackingOnly']]
workflows[136.885512] = ['',['RunHLTPhy2018D','HLTDR2_2018','RECODR2_2018reHLT_ECALOnlyGPU','HARVEST2018_ECALOnly']]
workflows[136.888512] = ['',['RunJetHT2018D','HLTDR2_2018','RECODR2_2018reHLT_ECALOnlyGPU','HARVEST2018_ECALOnly']]
workflows[136.885522] = ['',['RunHLTPhy2018D','HLTDR2_2018','RECODR2_2018reHLT_HCALOnlyGPU','HARVEST2018_HCALOnly']]
workflows[136.888522] = ['',['RunJetHT2018D','HLTDR2_2018','RECODR2_2018reHLT_HCALOnlyGPU','HARVEST2018_HCALOnly']]
|
the-stack_106_26503 | '''
uses code from
https://pypi.org/project/combalg-py/
https://pythonhosted.org/combalg-py/
License: MIT License (MIT)
Author: Sam Stump
'''
def all(n, k):
'''
A generator that returns all of the compositions of n into k parts.
:param n: integer to compose
:type n: int
:param k: number of parts to compose
:type k: int
:return: a list of k-elements which sum to n
Compositions are an expression of n as a sum k parts, including zero
terms, and order is important. For example, the compositions of 2
into 2 parts:
>>> compositions(2,2) = [2,0],[1,1],[0,2].
NOTE: There are C(n+k-1,n) partitions of n into k parts. See:
`Stars and Bars <https://en.wikipedia.org/wiki/Stars_and_bars_(combinatorics)>`_.
'''
t = n
h = 0
a = [0]*k
a[0] = n
yield tuple(a)
while a[k-1] != n:
if t != 1:
h = 0
t = a[h]
a[h] = 0
a[0] = t-1
a[h+1] += 1
h += 1
yield tuple(a)
|
the-stack_106_26504 | from moduleUsefulFunctions_20180215 import *
import scikits.bootstrap as boot
plt.close()
def updateDictData(dictionaryToUpdate,sequence,counts):
if sequence in dictionaryToUpdate:
dictionaryToUpdate[sequence]+=counts
else:
dictionaryToUpdate[sequence]=counts
def listToDictConverter(listOfDimers):
dictToReturn={}
for eachEl in listOfDimers:
if eachEl in dictToReturn:
dictToReturn[eachEl]+=1
else:
dictToReturn[eachEl]=1
return dictToReturn
def bootStrapDictReturn(indexList,dimerList):
dictToReturn={}
for eachEl in indexList:
if dimerList[eachEl] in dictToReturn:
dictToReturn[dimerList[eachEl]]+=1
else:
dictToReturn[dimerList[eachEl]]=1
return dictToReturn
def getHomoDict(dictData):
dictToReturn={}
for eachEl in dictData:
if eachEl[0]==eachEl[1]:
dictToReturn[eachEl]=dictData[eachEl]
return dictToReturn
def getHeteroDict(dictData):
dictToReturn={}
for eachEl in dictData:
if eachEl[0]!=eachEl[1]:
dictToReturn[eachEl]=dictData[eachEl]
return dictToReturn
def getFirstHalf(dictData):
dictToReturn={}
for eachEl in dictData:
if eachEl[0] in dictToReturn:
dictToReturn[eachEl[0]]+=dictData[eachEl]
else:
dictToReturn[eachEl[0]]=dictData[eachEl]
return dictToReturn
def getSecondHalf(dictData):
dictToReturn={}
for eachEl in dictData:
if eachEl[1] in dictToReturn:
dictToReturn[eachEl[1]]+=dictData[eachEl]
else:
dictToReturn[eachEl[1]]=dictData[eachEl]
return dictToReturn
def getTotal(refName,dictData):
total=0
for eachEl in dictData:
if eachEl[0][0]+eachEl[1][0]==refName:
total+=dictData[eachEl]
return total
def getCorrection(refName,firstHalfDict,secondHalfDict,totalsDict):
#correction required because the total of hetero dicts does not include homodimers produced because of intermolecular template switching
correction=1.0
for eachEl in firstHalfDict:
if eachEl in secondHalfDict:
if eachEl[0]==refName[0]:
correction=correction-(float(firstHalfDict[eachEl])/float(totalsDict[refName]))*(float(secondHalfDict[eachEl])/float(totalsDict[refName]))
return correction
def getXRange(dataVector,expPower):
xToUse=0.0
xVector=[]
for counter, eachEl in enumerate(dataVector):
if counter==0:
pass
else:
xToUse+=np.power(eachEl,expPower)
xVector.append(xToUse)
return xVector
def numberDiffHalves(seq1,seq2):
totalDiff=0
for counter, eachEl in enumerate(seq1):
if eachEl!=seq2[counter]:
totalDiff+=1
return totalDiff
with open(sys.argv[1],'rb') as f:
dimerCounts=cPickle.load(f) #dimerDump_ver2.pckl file
def bootstrapFunction(data):
#data is an indices list
dictSimData=bootStrapDictReturn(data,listDimerNames)
homoSimData=getHomoDict(dictSimData)
heteroSimData=getHeteroDict(dictSimData)
firstHalfWholeSimData=getFirstHalf(dictSimData)
secondHalfWholeSimData=getSecondHalf(dictSimData)
firstHalfHeteroSimData=getFirstHalf(heteroSimData)
secondHalfHeteroSimData=getSecondHalf(heteroSimData)
totalsWholeSim={}
totalsHeteroSim={}
totalsHeteroSubtractSim={}
listToReturn=[]
for eachEl in refsToUse:
totalsWholeSim[eachEl]=getTotal(eachEl,dictSimData)
totalsHeteroSim[eachEl]=getTotal(eachEl,heteroSimData)
totalsHeteroSubtractSim[eachEl]=getCorrection(eachEl,firstHalfHeteroSimData,secondHalfHeteroSimData,totalsHeteroSim)
for counter, eachEl in enumerate(dictSorter(homoOriginalExpDict)):
refName=eachEl[0][0]+eachEl[1][0]
if eachEl[0] in firstHalfWholeSimData and eachEl[1] in secondHalfWholeSimData:
listToReturn.append((float(firstHalfWholeSimData[eachEl[0]])*float(secondHalfWholeSimData[eachEl[1]]))/float(totalsWholeSim[refName]))
else:
listToReturn.append(0.0)
for counter, eachEl in enumerate(dictSorter(heteroOriginalExpDict)):
refName=eachEl[0][0]+eachEl[1][0]
if eachEl[0] in firstHalfHeteroSimData and eachEl[1] in secondHalfHeteroSimData:
listToReturn.append(((float(firstHalfHeteroSimData[eachEl[0]])*float(secondHalfHeteroSimData[eachEl[1]]))/float(totalsHeteroSim[refName]))/float(totalsHeteroSubtractSim[refName]))
else:
listToReturn.append(0.0)
return np.array(listToReturn)
sampleName=sys.argv[1]
sampleName=sampleName[:sampleName.rfind('_dimerDump_ver2.pckl')]
#first generate sample to bootstrap on, also restrict to GG and CC dimers
refsToUse=['GG','CC']
pcrDuplicates=0
listDimerNames=[]
for eachComb in dimerCounts:
refType=eachComb[0][0]+eachComb[1][0]
if refType in refsToUse:
for i in range(0,dimerCounts[eachComb][pcrDuplicates]):
listDimerNames.append(eachComb)
dictOriginalData=listToDictConverter(listDimerNames)
homoOriginalData=getHomoDict(dictOriginalData)
heteroOriginalData=getHeteroDict(dictOriginalData)
firstHalfWholeOriginalData=getFirstHalf(dictOriginalData)
secondHalfWholeOriginalData=getSecondHalf(dictOriginalData)
firstHalfHeteroOriginalData=getFirstHalf(heteroOriginalData)
secondHalfHeteroOriginalData=getSecondHalf(heteroOriginalData)
totalsWholeOriginal={}
totalsHeteroOriginal={}
totalsHeteroSubtractOriginal={}
for eachEl in refsToUse:
totalsWholeOriginal[eachEl]=getTotal(eachEl,dictOriginalData)
totalsHeteroOriginal[eachEl]=getTotal(eachEl,heteroOriginalData)
totalsHeteroSubtractOriginal[eachEl]=getCorrection(eachEl,firstHalfHeteroOriginalData,secondHalfHeteroOriginalData,totalsHeteroOriginal)
f,ax=plt.subplots(nrows=1,ncols=2,figsize=(24,12))
#plot data and expected counts
obsHomoCounts=[]
expHomoCounts=[]
homoOriginalExpDict={}
for eachEl in homoOriginalData:
refName=eachEl[0][0]+eachEl[1][0]
homoOriginalExpDict[eachEl]=(float(firstHalfWholeOriginalData[eachEl[0]])*float(secondHalfWholeOriginalData[eachEl[1]]))/float(totalsWholeOriginal[refName])
for eachEl in dictSorter(homoOriginalExpDict):
obsHomoCounts.append(homoOriginalData[eachEl])
expHomoCounts.append(homoOriginalExpDict[eachEl])
obsHeteroCounts=[]
expHeteroCounts=[]
heteroOriginalExpDict={}
for eachEl in heteroOriginalData:
refName=eachEl[0][0]+eachEl[1][0]
heteroOriginalExpDict[eachEl]=((float(firstHalfHeteroOriginalData[eachEl[0]])*float(secondHalfHeteroOriginalData[eachEl[1]]))/float(totalsHeteroOriginal[refName]))/float(totalsHeteroSubtractOriginal[refName])
for eachEl in dictSorter(heteroOriginalExpDict):
obsHeteroCounts.append(heteroOriginalData[eachEl])
expHeteroCounts.append(heteroOriginalExpDict[eachEl])
#generate bootstrap data
confidenceIntervals=boot.ci(np.arange(len(listDimerNames)), statfunction=bootstrapFunction, alpha=0.05, n_samples=100000, method='bca', output='lowhigh')
#first row of confidenceIntervals is lowPercentile
#second row of confidenceIntervals is highPercentile
lowPercentile=confidenceIntervals[0,:]
highPercentile=confidenceIntervals[1,:]
obsHomoCounts=np.array(obsHomoCounts)
expHomoCounts=np.array(expHomoCounts)
obsHeteroCounts=np.array(obsHeteroCounts)
expHeteroCounts=np.array(expHeteroCounts)
#re-sort based on high percentile values
lowPercentileHomo=lowPercentile[:len(homoOriginalExpDict)]
lowPercentileHetero=lowPercentile[len(homoOriginalExpDict):]
highPercentileHomo=highPercentile[:len(homoOriginalExpDict)]
highPercentileHetero=highPercentile[len(homoOriginalExpDict):]
#homo sort
newIndexArray=np.argsort(highPercentileHomo)
newIndexArray=newIndexArray[::-1]
lowPercentileHomo=lowPercentileHomo[newIndexArray]
highPercentileHomo=highPercentileHomo[newIndexArray]
obsHomoCounts=obsHomoCounts[newIndexArray]
expHomoCounts=expHomoCounts[newIndexArray]
#hetero sort
newIndexArray=np.argsort(highPercentileHetero)
newIndexArray=newIndexArray[::-1]
lowPercentileHetero=lowPercentileHetero[newIndexArray]
highPercentileHetero=highPercentileHetero[newIndexArray]
obsHeteroCounts=obsHeteroCounts[newIndexArray]
expHeteroCounts=expHeteroCounts[newIndexArray]
firstXRange=getXRange(highPercentileHomo,0.25)
secondXRange=getXRange(highPercentileHetero,0.7)
ax[0].plot(firstXRange,obsHomoCounts,'bo',mec='b')
ax[0].fill_between(firstXRange,lowPercentileHomo,highPercentileHomo,color=(230.0/255.0,230.0/255.0,0.0/255.0))
ax[0].set_yscale('symlog',linthreshy=1.0)
ax[0].set_xlim([-1*(firstXRange[2]-firstXRange[1]),None])
ax[0].set_ylim([0,10**5])
ax[0].set_xlabel('Homodimer species')
ax[0].set_ylabel('Counts')
ax[1].plot(secondXRange,obsHeteroCounts,'bo',mec='b')
ax[1].fill_between(secondXRange,lowPercentileHetero,highPercentileHetero,color=(230.0/255.0,230.0/255.0,0.0/255.0))
ax[1].set_yscale('symlog',linthreshy=1.0)
ax[1].set_xlim([-1*(secondXRange[2]-secondXRange[1]),None])
ax[1].set_ylim([0,10**5])
ax[1].set_xlabel('Heterodimer species')
ax[1].set_ylabel('Counts')
#plt.show()
plt.savefig(sampleName+'_agreementByVariant_20180426_ver4.pdf',dpi=300)
|
the-stack_106_26507 | from Abstract.Expression import Expression
from Environment.Environment import Environment
from Environment.Value import Value
from Enum.typeExpression import typeExpression
class Multiply(Expression):
def __init__(self, left: Expression, right: Expression) -> None:
super().__init__()
self.leftExpression = left
self.rightExpression = right
def compile(self, environment: Environment) -> Value:
self.leftExpression.generator = self.generator
self.rightExpression.generator = self.generator
leftValue: Value = self.leftExpression.compile(environment)
rightValue: Value = self.rightExpression.compile(environment)
newTemp = self.generator.newTemp()
if(leftValue.type == typeExpression.INTEGER):
if(rightValue.type == typeExpression.INTEGER or rightValue.type == typeExpression.FLOAT):
self.generator.addExpression(newTemp,leftValue.getValue(),rightValue.getValue(),"*")
return Value(newTemp,True,rightValue.type)
else:
print("Error en resta")
return Value("0",False,typeExpression.INTEGER)
elif(leftValue.type == typeExpression.FLOAT):
if(rightValue.type == typeExpression.INTEGER or rightValue.type == typeExpression.FLOAT):
self.generator.addExpression(newTemp,leftValue.getValue(),rightValue.getValue(),"*")
return Value(newTemp,True,typeExpression.FLOAT)
else:
print("Error en resta")
return Value("0",False,typeExpression.INTEGER)
else:
print("Error en resta")
return Value("0",False,typeExpression.INTEGER) |
the-stack_106_26509 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Royal Cuevas
#2285562
#[email protected]
#PHYS220 Fall 2018
#CW 09
import numpy as np
import matplotlib.pyplot as plt
def gradient(x, o=1):
"""Requires a 1-dimensional matrix.
Will compute the derivative of the
corresponding function.
For larger order derivatives put the
order as an int in the 2nd argument,
defaults to 1."""
D = np.zeros((len(x), len(x)))
ones = np.ones(len(D)-1)
np.fill_diagonal(D[1:], -ones)
np.fill_diagonal(D[:, 1:], ones)
D[0][0] = -2
D[0][1] = 2
D[len(D)-1][len(D)-1] = 2
D[len(D)-1][len(D)-2] = -2
for i in range(o-1):
D = D@D
return D@x*.5
def xx():
"""Plots x^2 and its derivative"""
x = np.arange(0, 7)
f = x**2
fp = gradient(f)
a = plt.axes()
a.plot(x, f, label="f(x)")
a.plot(x, fp, color="Red", label="f'(x)")
a.set(xlabel="x", ylabel="y", title="$x^2$")
a.legend()
plt.show()
def second():
"""Plots x^2 and its 2nd derivative"""
x = np.arange(0, 7)
f = x**2
fpp = gradient(f, 2)
a = plt.axes()
a.plot(x, f, label="f(x)")
a.plot(x, fpp, color="Red", label="f''(x)")
a.set(xlabel="x", ylabel="y", title="$x^2$")
a.legend()
plt.show()
def xxgrad():
x = np.arange(0, 7)
f = x**2
fp = np.gradient(f)
a = plt.axes()
a.plot(x, f, label="f(x)")
a.plot(x, fp, color="Red", label="f'(x)")
a.set(xlabel="x", ylabel="y", title="$x^2$")
a.legend()
plt.show()
def sinx():
"""Plots sin(x) and its derivative"""
x = np.arange(0, 8)
s = np.sin(x)
sp = gradient(s)
a = plt.axes()
a.plot(x, s, label="s(x)")
a.plot(x, sp, color="Red", label="s'(x)")
a.set(xlabel="x", ylabel="y", title="sin(x)")
a.legend()
plt.show()
def sin2():
"""Plots sin(x) and its 2nd derivative"""
x = np.arange(0, 8)
s = np.sin(x)
spp = gradient(s, 2)
a = plt.axes()
a.plot(x, s, label="s(x)")
a.plot(x, spp, color="Red", label="s''(x)")
a.set(xlabel="x", ylabel="y", title="sin(x)")
a.legend()
plt.show()
def sinxgrad():
x = np.arange(0, 8)
s = np.sin(x)
sp = np.gradient(s)
a = plt.axes()
a.plot(x, s, label="s(x)")
a.plot(x, sp, color="Red", label="s'(x)")
a.set(xlabel="x", ylabel="y", title="$sin(x)$")
a.legend()
plt.show()
def exp():
"""Plots e^(-x^2/2)/sqrt(2pi) and its derivative"""
x = np.arange(0, 6)
g = np.exp(-x**2/2)/np.sqrt(2*np.pi)
gp = gradient(g)
a = plt.axes()
a.plot(x, g, label="g(x)")
a.plot(x, gp, color="Red", label="g'(x)")
a.set(xlabel="x", ylabel="y", title="$e^{-x^2/2}/\sqrt{2\pi}$")
a.legend()
plt.show()
def expgrad():
x = np.arange(0, 6)
g = np.exp(-x**2/2)/np.sqrt(2*np.pi)
gp = np.gradient(g)
a = plt.axes()
a.plot(x, g, label="g(x)")
a.plot(x, gp, color="Red", label="g'(x)")
a.set(xlabel="x", ylabel="y", title="$e^{-x^2/2}/\sqrt{2\pi}$")
a.legend()
plt.show() |
the-stack_106_26512 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''result.py'''
import abc
import sys
from enum import Enum
from heron.common.src.python.utils import proc
from heron.common.src.python.utils.log import Log
# Meaning of exit status code:
# - status code = 0:
# program exits without error
# - 0 < status code < 100:
# program fails to execute before program execution. For example,
# JVM cannot find or load main class
# - 100 <= status code < 200:
# program fails to launch after program execution. For example,
# topology definition file fails to be loaded
# - status code == 200:
# program sends out dry-run response
# Definition corresponds to definition in com.twitter.heron.scheduler.AbstractMain
# pylint: disable=no-init
class Status(Enum):
"""Status code enum"""
Ok = 0
InvocationError = 1
HeronError = 100
DryRun = 200
def status_type(status_code):
if status_code == 0:
return Status.Ok
elif status_code < 100:
return Status.InvocationError
elif status_code == 200:
return Status.DryRun
else:
return Status.HeronError
class Result(object):
"""Result class"""
def __init__(self, status=None, err_context=None, succ_context=None):
self.status = status
self.err_context = err_context
self.succ_context = succ_context
@staticmethod
def _do_log(log_f, msg):
if msg:
if msg[-1] == '\n':
msg = msg[:-1]
log_f(msg)
@staticmethod
def _do_print(f, msg):
if msg:
if msg[-1] == '\n':
msg = msg[:-1]
print >> f, msg
def _log_context(self):
# render context only after process exits
assert self.status is not None
if self.status in [Status.Ok, Status.DryRun]:
self._do_log(Log.info, self.succ_context)
elif self.status in [Status.HeronError, Status.InvocationError]:
self._do_log(Log.error, self.err_context)
else:
raise RuntimeError(
"Unknown status type of value %d. Expected value: %s", self.status.value, list(Status))
def add_context(self, err_context, succ_context=None):
""" Prepend msg to add some context information
:param pmsg: context info
:return: None
"""
self.err_context = err_context
self.succ_context = succ_context
@abc.abstractmethod
def render(self):
pass
class SimpleResult(Result):
"""Simple result: result that already and only
contains status of the result"""
def __init__(self, *args):
super(SimpleResult, self).__init__(*args)
def render(self):
self._log_context()
class ProcessResult(Result):
"""Process result: a wrapper of result class"""
def __init__(self, process):
super(ProcessResult, self).__init__()
self.process = process
self.stdout_builder = proc.async_stdout_builder(process)
# start redirect stderr in initialization, before render() gets called
proc.async_stream_process_stderr(self.process, self.renderProcessStdErr)
def renderProcessStdErr(self, stderr_line):
""" render stderr of shelled-out process
stderr could be error message of failure of invoking process or
normal stderr output from successfully shelled-out process.
In the first case, ``Popen'' should fail fast and we should be able to
get return code immediately. We then render the failure message.
In the second case, we simply print stderr line in stderr.
The way to handle the first case is shaky but should be the best we can
do since we have conflicts of design goals here.
:param stderr_line: one line from shelled-out process
:return:
"""
retcode = self.process.poll()
if retcode is not None and status_type(retcode) == Status.InvocationError:
self._do_log(Log.error, stderr_line)
else:
self._do_print(sys.stderr, stderr_line)
def renderProcessStdOut(self, stdout):
""" render stdout of shelled-out process
stdout always contains information Java process wants to
propagate back to cli, so we do special rendering here
:param stdout: all lines from shelled-out process
:return:
"""
# since we render stdout line based on Java process return code,
# ``status'' has to be already set
assert self.status is not None
# remove pending newline
if self.status == Status.Ok:
self._do_log(Log.info, stdout)
elif self.status == Status.HeronError:
# remove last newline since logging will append newline
self._do_log(Log.error, stdout)
# No need to prefix [INFO] here. We want to display dry-run response in a clean way
elif self.status == Status.DryRun:
self._do_print(sys.stdout, stdout)
elif self.status == Status.InvocationError:
self._do_print(sys.stdout, stdout)
else:
raise RuntimeError(
"Unknown status type of value %d. Expected value: %s" % \
(self.status.value, list(Status)))
def render(self):
self.process.wait()
self.status = status_type(self.process.returncode)
self.renderProcessStdOut(self.stdout_builder.result())
self._log_context()
def render(results):
if isinstance(results, Result):
results.render()
elif isinstance(results, list):
for r in results:
r.render()
else:
raise RuntimeError("Unknown result instance: %s", str(results.__class__))
# check if all results are successful
def is_successful(results):
if isinstance(results, list):
return all([result.status == Status.Ok for result in results])
elif isinstance(results, Result):
return results.status == Status.Ok
else:
raise RuntimeError("Unknown result instance: %s", str(results.__class__))
|
the-stack_106_26513 | #!/usr/bin/env python3
import ufarc
class Iterate(ufarc.Ahsm):
def __init__(self,):
super().__init__(Iterate.initial)
ufarc.SIGNAL.register("ITERATE")
def initial(me, event):
print("initial")
me.iter_evt = (ufarc.SIGNAL.ITERATE, None)
return me.tran(me, Iterate.iterating)
def iterating(me, event):
sig = event[ufarc.Event.SIG_IDX]
if sig == ufarc.SIGNAL.ENTRY:
print("iterating")
me.count = 10
me.postFIFO(me.iter_evt)
return me.handled(me, event)
elif sig == ufarc.SIGNAL.ITERATE:
print(me.count)
if me.count == 0:
return me.tran(me, Iterate.done)
else:
# do work
me.count -= 1
me.postFIFO(me.iter_evt)
return me.handled(me, event)
return me.super(me, me.top)
def done(me, event):
sig = event[ufarc.Event.SIG_IDX]
if sig == ufarc.SIGNAL.ENTRY:
print("done")
ufarc.Framework.stop()
return me.handled(me, event)
return me.super(me, me.top)
if __name__ == "__main__":
sl = Iterate()
sl.start(0)
ufarc.Framework.run_forever()
|
the-stack_106_26516 | """Plot functions for the profiling report."""
import copy
from typing import Any, Callable, Optional, Union
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.colors import Colormap, LinearSegmentedColormap, ListedColormap
from matplotlib.patches import Patch
from matplotlib.ticker import FuncFormatter
from pandas_profiling.config import Settings
from pandas_profiling.utils.common import convert_timestamp_to_datetime
from pandas_profiling.visualisation.context import manage_matplotlib_context
from pandas_profiling.visualisation.utils import plot_360_n0sc0pe
def _plot_histogram(
config: Settings,
series: np.ndarray,
bins: Union[int, np.ndarray],
figsize: tuple = (6, 4),
date: bool = False,
) -> plt.Figure:
"""Plot an histogram from the data and return the AxesSubplot object.
Args:
series: The data to plot
figsize: The size of the figure (width, height) in inches, default (6,4)
bins: number of bins (int for equal size, ndarray for variable size)
Returns:
The histogram plot.
"""
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel("Frequency")
# we have precomputed the histograms...
diff = np.diff(bins)
plot.bar(
bins[:-1] + diff / 2, # type: ignore
series,
diff,
facecolor=config.html.style.primary_color,
)
if date:
def format_fn(tick_val: int, tick_pos: Any) -> str:
return convert_timestamp_to_datetime(tick_val).strftime("%Y-%m-%d %H:%M:%S")
plot.xaxis.set_major_formatter(FuncFormatter(format_fn))
if not config.plot.histogram.x_axis_labels:
plot.set_xticklabels([])
return plot
@manage_matplotlib_context()
def histogram(
config: Settings,
series: np.ndarray,
bins: Union[int, np.ndarray],
date: bool = False,
) -> str:
"""Plot an histogram of the data.
Args:
config: Settings
series: The data to plot.
bins: number of bins (int for equal size, ndarray for variable size)
date: is histogram of date(time)?
Returns:
The resulting histogram encoded as a string.
"""
plot = _plot_histogram(config, series, bins, date=date)
plot.xaxis.set_tick_params(rotation=90 if date else 45)
plot.figure.tight_layout()
return plot_360_n0sc0pe(config)
@manage_matplotlib_context()
def mini_histogram(
config: Settings,
series: np.ndarray,
bins: Union[int, np.ndarray],
date: bool = False,
) -> str:
"""Plot a small (mini) histogram of the data.
Args:
config: Settings
series: The data to plot.
bins: number of bins (int for equal size, ndarray for variable size)
Returns:
The resulting mini histogram encoded as a string.
"""
plot = _plot_histogram(config, series, bins, figsize=(3, 2.25), date=date)
plot.axes.get_yaxis().set_visible(False)
plot.set_facecolor("w")
for tick in plot.xaxis.get_major_ticks():
tick.label1.set_fontsize(6 if date else 8)
plot.xaxis.set_tick_params(rotation=90 if date else 45)
plot.figure.tight_layout()
return plot_360_n0sc0pe(config)
def get_cmap_half(
cmap: Union[Colormap, LinearSegmentedColormap, ListedColormap]
) -> LinearSegmentedColormap:
"""Get the upper half of the color map
Args:
cmap: the color map
Returns:
A new color map based on the upper half of another color map
References:
https://stackoverflow.com/a/24746399/470433
"""
# Evaluate an existing colormap from 0.5 (midpoint) to 1 (upper end)
colors = cmap(np.linspace(0.5, 1, cmap.N // 2))
# Create a new colormap from those colors
return LinearSegmentedColormap.from_list("cmap_half", colors)
def get_correlation_font_size(n_labels: int) -> Optional[int]:
"""Dynamic label font sizes in correlation plots
Args:
n_labels: the number of labels
Returns:
A font size or None for the default font size
"""
if n_labels > 100:
font_size = 4
elif n_labels > 80:
font_size = 5
elif n_labels > 50:
font_size = 6
elif n_labels > 40:
font_size = 8
else:
return None
return font_size
@manage_matplotlib_context()
def correlation_matrix(config: Settings, data: pd.DataFrame, vmin: int = -1) -> str:
"""Plot image of a matrix correlation.
Args:
config: Settings
data: The matrix correlation to plot.
vmin: Minimum value of value range.
Returns:
The resulting correlation matrix encoded as a string.
"""
fig_cor, axes_cor = plt.subplots()
cmap = plt.get_cmap(config.plot.correlation.cmap)
if vmin == 0:
cmap = get_cmap_half(cmap)
cmap = copy.copy(cmap)
cmap.set_bad(config.plot.correlation.bad)
labels = data.columns
matrix_image = axes_cor.imshow(
data, vmin=vmin, vmax=1, interpolation="nearest", cmap=cmap
)
plt.colorbar(matrix_image)
if data.isnull().values.any():
legend_elements = [Patch(facecolor=cmap(np.nan), label="invalid\ncoefficient")]
plt.legend(
handles=legend_elements,
loc="upper right",
handleheight=2.5,
)
axes_cor.set_xticks(np.arange(0, data.shape[0], float(data.shape[0]) / len(labels)))
axes_cor.set_yticks(np.arange(0, data.shape[1], float(data.shape[1]) / len(labels)))
font_size = get_correlation_font_size(len(labels))
axes_cor.set_xticklabels(labels, rotation=90, fontsize=font_size)
axes_cor.set_yticklabels(labels, fontsize=font_size)
plt.subplots_adjust(bottom=0.2)
return plot_360_n0sc0pe(config)
@manage_matplotlib_context()
def scatter_complex(config: Settings, series: pd.Series) -> str:
"""Scatter plot (or hexbin plot) from a series of complex values
Examples:
>>> complex_series = pd.Series([complex(1, 3), complex(3, 1)])
>>> scatter_complex(complex_series)
Args:
config: Settings
series: the Series
Returns:
A string containing (a reference to) the image
"""
plt.ylabel("Imaginary")
plt.xlabel("Real")
color = config.html.style.primary_color
if len(series) > config.plot.scatter_threshold:
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(series.real, series.imag, cmap=cmap)
else:
plt.scatter(series.real, series.imag, color=color)
return plot_360_n0sc0pe(config)
@manage_matplotlib_context()
def scatter_series(
config: Settings, series: pd.Series, x_label: str = "Width", y_label: str = "Height"
) -> str:
"""Scatter plot (or hexbin plot) from one series of sequences with length 2
Examples:
>>> scatter_series(file_sizes, "Width", "Height")
Args:
config: report Settings object
series: the Series
x_label: the label on the x-axis
y_label: the label on the y-axis
Returns:
A string containing (a reference to) the image
"""
plt.xlabel(x_label)
plt.ylabel(y_label)
color = config.html.style.primary_color
data = zip(*series.tolist())
if len(series) > config.plot.scatter_threshold:
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(*data, cmap=cmap)
else:
plt.scatter(*data, color=color)
return plot_360_n0sc0pe(config)
@manage_matplotlib_context()
def scatter_pairwise(
config: Settings, series1: pd.Series, series2: pd.Series, x_label: str, y_label: str
) -> str:
"""Scatter plot (or hexbin plot) from two series
Examples:
>>> widths = pd.Series([800, 1024])
>>> heights = pd.Series([600, 768])
>>> scatter_series(widths, heights, "Width", "Height")
Args:
config: Settings
series1: the series corresponding to the x-axis
series2: the series corresponding to the y-axis
x_label: the label on the x-axis
y_label: the label on the y-axis
Returns:
A string containing (a reference to) the image
"""
plt.xlabel(x_label)
plt.ylabel(y_label)
color = config.html.style.primary_color
indices = (series1.notna()) & (series2.notna())
if len(series1) > config.plot.scatter_threshold:
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(series1[indices], series2[indices], gridsize=15, cmap=cmap)
else:
plt.scatter(series1[indices], series2[indices], color=color)
return plot_360_n0sc0pe(config)
@manage_matplotlib_context()
def pie_plot(
config: Settings, data: pd.Series, legend_kws: Optional[dict] = None
) -> str:
"""Generate pie plot showing proportions of categorical and boolean
variables. Modify colors by setting 'config.plot.pie.colors' to a
list of valid matplotib colors.
https://matplotlib.org/stable/tutorials/colors/colors.html
Args:
config (Settings): a config
data (pd.Series): the categories and their frequency
legend_kws (Optional[dict], optional): Defaults to None.
Returns:
str: pie plot encoded in text
"""
if legend_kws is None:
legend_kws = {}
def make_autopct(values: pd.Series) -> Callable:
def my_autopct(pct: float) -> str:
total = np.sum(values)
val = int(round(pct * total / 100.0))
return f"{pct:.1f}% ({val:d})"
return my_autopct
wedges, _, _ = plt.pie(
data,
autopct=make_autopct(data),
textprops={"color": "w"},
colors=config.plot.pie.colors,
)
plt.legend(wedges, data.index.values, **legend_kws)
return plot_360_n0sc0pe(config)
|
the-stack_106_26518 | import random
import numpy as np
import config
s1, a1 = config.patch_size_subtracter, config.patch_size_adder
def is_in_bounds(im, idx):
i, j, k = idx
return \
i - s1 >= 0 and i + a1 < im.shape[0] and \
j - s1 >= 0 and j + a1 < im.shape[1] and \
k - s1 >= 0 and k + a1 < im.shape[2]
def to_vector(lists):
return [item for sublist in lists for item in sublist]
def is_flip(p):
return random.random() < p
def get_training_example(im_idx, im_arr, im_truth_arr, lbl, idx, edge_idxs_dict=None):
if not is_in_bounds(im_arr, idx): return [], []
x_for_idx = to_vector([[im_idx], idx, [0, 0, 0], [False]])
y_for_idx = lbl - 1
'''i, j, k = idx
if edge_idxs_dict is not None and (i, j, k) in edge_idxs_dict:
for _ in range(5):
x_for_lbl.append(
getX([[im_idx], idx, np.random.uniform(-5, 5, 3), [is_flip(0.5)]]))
y_for_lbl.append(lbl - 1)'''
return [x_for_idx], [y_for_idx]
def get_examples_for_label(im_idx, im_arr, im_truth_arr, lbl, edge_idxs_dict=None):
x_for_lbl, y_for_lbl = [], []
lbl_idxs = np.where(im_truth_arr == lbl)
for idx in zip(*lbl_idxs):
x_for_idx, y_for_idx = get_training_example(im_idx, im_arr, im_truth_arr, lbl, idx, edge_idxs_dict)
x_for_lbl.extend(x_for_idx)
y_for_lbl.extend(y_for_idx)
return x_for_lbl, y_for_lbl
def get_examples(im_idx, im_arr, im_truth_arr, edge_idxs_dict=None):
x_list, y_list = [], []
for lbl in range(1, 4):
x_for_lbl, y_for_lbl = get_examples_for_label(im_idx, im_arr, im_truth_arr, lbl, edge_idxs_dict)
x_list.extend(x_for_lbl)
y_list.extend(y_for_lbl)
x_arr = np.array(x_list)
y_arr = np.array(y_list)
y_arr_one_hot = np.zeros([y_arr.shape[0], 3])
y_arr_one_hot[np.arange(y_arr.shape[0]), y_arr] = 1
return x_arr, y_arr_one_hot
if __name__ == '__main__':
pass
|
the-stack_106_26520 | # -*- Python -*-
import os
import platform
import re
import subprocess
import tempfile
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR'
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.td', '.mlir', '.toy', '.ll', '.tc', '.py']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.mlir_obj_root, 'test')
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
config.substitutions.append(("%mlir_src_root", config.mlir_src_root))
llvm_config.with_system_environment(
['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
llvm_config.use_default_substitutions()
# excludes: A list of directories to exclude from the testsuite. The 'Inputs'
# subdirectories contain auxiliary inputs for various tests in their parent
# directories.
config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt',
'lit.cfg.py', 'lit.site.cfg.py']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.mlir_obj_root, 'test')
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = [config.mlir_tools_dir, config.llvm_tools_dir]
tools = [
'mlir-opt',
'mlir-tblgen',
'mlir-translate',
'mlir-capi-ir-test',
'mlir-edsc-builder-api-test',
]
# The following tools are optional
tools.extend([
ToolSubst('%PYTHON', config.python_executable),
ToolSubst('toy-ch1', unresolved='ignore'),
ToolSubst('toy-ch2', unresolved='ignore'),
ToolSubst('toy-ch3', unresolved='ignore'),
ToolSubst('toy-ch4', unresolved='ignore'),
ToolSubst('toy-ch5', unresolved='ignore'),
ToolSubst('%cuda_wrapper_library_dir', config.cuda_wrapper_library_dir, unresolved='ignore'),
ToolSubst('%linalg_test_lib_dir', config.linalg_test_lib_dir, unresolved='ignore'),
ToolSubst('%mlir_runner_utils_dir', config.mlir_runner_utils_dir, unresolved='ignore'),
ToolSubst('%rocm_wrapper_library_dir', config.rocm_wrapper_library_dir, unresolved='ignore'),
ToolSubst('%vulkan_wrapper_library_dir', config.vulkan_wrapper_library_dir, unresolved='ignore'),
])
llvm_config.add_tool_substitutions(tools, tool_dirs)
# FileCheck -enable-var-scope is enabled by default in MLIR test
# This option avoids to accidentally reuse variable across -LABEL match,
# it can be explicitly opted-in by prefixing the variable name with $
config.environment['FILECHECK_OPTS'] = "-enable-var-scope"
# LLVM can be configured with an empty default triple
# by passing ` -DLLVM_DEFAULT_TARGET_TRIPLE="" `.
# This is how LLVM filters tests that require the host target
# to be available for JIT tests.
if config.target_triple:
config.available_features.add('default_triple')
# Add the python path for both the source and binary tree.
# Note that presently, the python sources come from the source tree and the
# binaries come from the build tree. This should be unified to the build tree
# by copying/linking sources to build.
if config.enable_bindings_python:
llvm_config.with_environment('PYTHONPATH', [
os.path.join(config.mlir_src_root, "lib", "Bindings", "Python"),
os.path.join(config.mlir_obj_root, "lib", "Bindings", "Python"),
], append_path=True)
|
the-stack_106_26524 | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import boto3
from botocore.paginate import (PageIterator, Paginator)
from botocore.client import BaseClient
from botocore.exceptions import (ClientError, ConfigNotFound, NoCredentialsError, ProfileNotFound)
from typing import Dict, List
from model import error_messages
from model.basic_resource_attributes import (BasicResourceAttributes, BasicResourceAttributesBuilder)
"""
AWS Utils provide related functions to interact with aws service, e.g. getting
aws account, region, resources, etc.
"""
_PAGINATION_MAX_ITEMS: int = 10
_PAGINATION_PAGE_SIZE: int = 10
default_session: boto3.session.Session = None
class AWSConstants(object):
CLOUDFORMATION_SERVICE_NAME: str = "cloudformation"
CLOUDFORMATION_LIST_STACKS_API_NAME: str = "list_stacks"
CLOUDFORMATION_LIST_STACK_RESOURCES_API_NAME: str = "list_stack_resources"
CLOUDFORMATION_STACKS_STATUS_FILTERS: List[str] = ["CREATE_COMPLETE", "ROLLBACK_COMPLETE", "UPDATE_COMPLETE",
"IMPORT_COMPLETE", "IMPORT_ROLLBACK_COMPLETE"]
LAMBDA_SERVICE_NAME: str = "lambda"
LAMBDA_LIST_FUNCTIONS_API_NAME: str = "list_functions"
DYNAMODB_SERVICE_NAME: str = "dynamodb"
DYNAMODB_LIST_TABLES_API_NAME: str = "list_tables"
STS_SERVICE_NAME: str = "sts"
S3_SERVICE_NAME: str = "s3"
def _close_client_connection(client: BaseClient) -> None:
session: boto3.session.Session = client._endpoint.http_session
managers: List[object] = [session._manager, *session._proxy_managers.values()]
for manager in managers:
manager.clear()
def _initialize_boto3_aws_client(service: str, region: str = "") -> BaseClient:
if region:
boto3_client: BaseClient = default_session.client(service, region_name=region)
else:
boto3_client: BaseClient = default_session.client(service)
boto3_client.meta.events.register(
f"after-call.{service}.*", lambda **kwargs: _close_client_connection(boto3_client)
)
return boto3_client
def setup_default_session(profile: str) -> None:
try:
global default_session
default_session = boto3.session.Session(profile_name=profile)
except (ConfigNotFound, ProfileNotFound) as error:
raise RuntimeError(error)
def get_default_account_id() -> str:
sts_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.STS_SERVICE_NAME)
try:
return sts_client.get_caller_identity()["Account"]
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
"get_caller_identity", error.response['Error']['Code'], error.response['Error']['Message']))
except NoCredentialsError as error:
raise RuntimeError(error)
def get_default_region() -> str:
region: str = default_session.region_name
if region:
return region
sts_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.STS_SERVICE_NAME)
region = sts_client.meta.region_name
if region:
return region
return ""
def list_s3_buckets(region: str = "") -> List[str]:
s3_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.S3_SERVICE_NAME, region)
try:
response: Dict[str, any] = s3_client.list_buckets()
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
"list_buckets", error.response['Error']['Code'], error.response['Error']['Message']))
bucket_names: List[str] = []
bucket: Dict[str, any]
for bucket in response["Buckets"]:
bucket_names.append(bucket["Name"])
return bucket_names
def list_lambda_functions(region: str = "") -> List[str]:
lambda_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.LAMBDA_SERVICE_NAME, region)
try:
lambda_paginator: Paginator = lambda_client.get_paginator(AWSConstants.LAMBDA_LIST_FUNCTIONS_API_NAME)
iterator: PageIterator = lambda_paginator.paginate(PaginationConfig={"PageSize": _PAGINATION_PAGE_SIZE})
function_names: List[str] = []
page: Dict[str, any]
for page in iterator:
function: Dict[str, any]
for function in page["Functions"]:
function_names.append(function["FunctionName"])
return function_names
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
AWSConstants.LAMBDA_LIST_FUNCTIONS_API_NAME,
error.response['Error']['Code'], error.response['Error']['Message']))
def list_dynamodb_tables(region: str = "") -> List[str]:
dynamodb_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.DYNAMODB_SERVICE_NAME, region)
try:
dynamodb_paginator: Paginator = dynamodb_client.get_paginator(AWSConstants.DYNAMODB_LIST_TABLES_API_NAME)
iterator: PageIterator = dynamodb_paginator.paginate(PaginationConfig={"PageSize": _PAGINATION_PAGE_SIZE})
table_names: List[str] = []
page: Dict[str, any]
for page in iterator:
table_names.extend(page["TableNames"])
return table_names
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
AWSConstants.DYNAMODB_LIST_TABLES_API_NAME,
error.response['Error']['Code'], error.response['Error']['Message']))
def list_cloudformation_stacks(region: str = "") -> List[str]:
cfn_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.CLOUDFORMATION_SERVICE_NAME, region)
try:
cfn_paginator: Paginator = cfn_client.get_paginator(AWSConstants.CLOUDFORMATION_LIST_STACKS_API_NAME)
iterator: PageIterator = \
cfn_paginator.paginate(StackStatusFilter=AWSConstants.CLOUDFORMATION_STACKS_STATUS_FILTERS)
stack_names: List[str] = []
page: Dict[str, any]
for page in iterator:
stack: Dict[str, any]
for stack in page["StackSummaries"]:
stack_names.append(stack["StackName"])
return stack_names
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
AWSConstants.CLOUDFORMATION_LIST_STACKS_API_NAME,
error.response['Error']['Code'], error.response['Error']['Message']))
def list_cloudformation_stack_resources(stack_name, region=None) -> List[BasicResourceAttributes]:
cfn_client: BaseClient = _initialize_boto3_aws_client(AWSConstants.CLOUDFORMATION_SERVICE_NAME, region)
try:
cfn_paginator: Paginator = cfn_client.get_paginator(AWSConstants.CLOUDFORMATION_LIST_STACK_RESOURCES_API_NAME)
resource_type_and_name: List[BasicResourceAttributes] = []
starting_token: str = None
while True:
# list cloudformation stack resources with starting token and max items. StartingToken is used to mark
# the starting point of the request; if None, it means request from start. MaxItems is used to define the
# total number of resources requested in the page iterator.
iterator: PageIterator = \
cfn_paginator.paginate(StackName=stack_name,
PaginationConfig={"MaxItems": _PAGINATION_MAX_ITEMS,
"StartingToken": starting_token})
page: Dict[str, any]
for page in iterator:
# iterate through page iterator to fetch all resources
resource: Dict[str, any]
for resource in page["StackResourceSummaries"]:
if "ResourceType" in resource.keys() and "PhysicalResourceId" in resource.keys():
resource_type_and_name.append(BasicResourceAttributesBuilder()
.build_type(resource["ResourceType"])
.build_name_id(resource["PhysicalResourceId"])
.build())
if iterator.resume_token is None:
# when resume token is none, it means there is no more resources left
break
else:
# setting starting token to resume token, then next request will query with proper starting point
starting_token = iterator.resume_token
return resource_type_and_name
except ClientError as error:
raise RuntimeError(error_messages.AWS_SERVICE_REQUEST_CLIENT_ERROR_MESSAGE.format(
AWSConstants.CLOUDFORMATION_LIST_STACK_RESOURCES_API_NAME,
error.response['Error']['Code'], error.response['Error']['Message']))
|
the-stack_106_26525 | #-*- coding: utf-8 -*-
import os
import torch
import argparse
import numpy as np
from tqdm import tqdm
from transformers import BertTokenizer
from dataset import DualSample, TokenizedSample, OriginalDataset
def tokenize_data(data, mode='train'):
max_forward_asp_query_length = 0
max_forward_opi_query_length = 0
max_sentiment_query_length = 0
max_aspect_num = 0
tokenized_sample_list = []
header_fmt = 'Tokenize data {:>5s}'
for sample in tqdm(data, desc=f"{header_fmt.format(mode.upper())}"):
forward_queries = []
forward_answers = []
sentiment_queries = []
sentiment_answers = []
forward_queries_seg = []
sentiment_queries_seg = []
if int(len(sample.sentiment_queries)) > max_aspect_num:
max_aspect_num = int(len(sample.sentiment_queries))
for idx in range(len(sample.forward_queries)):
temp_query = sample.forward_queries[idx]
temp_text = sample.text
temp_answer = sample.forward_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
temp_answer[0] = [-1] * (len(temp_query) + 2) + temp_answer[0]
temp_answer[1] = [-1] * (len(temp_query) + 2) + temp_answer[1]
assert len(temp_answer[0]) == len(temp_answer[1]) == len(temp_query_to) == len(temp_query_seg)
if len(temp_query_to) > max_forward_asp_query_length:
max_forward_asp_query_length = len(temp_query_to)
forward_queries.append(temp_query_to)
forward_answers.append(temp_answer)
forward_queries_seg.append(temp_query_seg)
for idx in range(len(sample.sentiment_queries)):
temp_query = sample.sentiment_queries[idx]
temp_text = sample.text
temp_answer = sample.sentiment_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
assert len(temp_query_to) == len(temp_query_seg)
if len(temp_query_to) > max_sentiment_query_length:
max_sentiment_query_length = len(temp_query_to)
sentiment_queries.append(temp_query_to)
sentiment_answers.append(temp_answer)
sentiment_queries_seg.append(temp_query_seg)
# import numpy as np
# print(f"forward_queries: {np.shape(forward_queries)} {type(forward_queries)} | {forward_queries}")
# print(f"forward_answers: {np.shape(forward_answers)}")
# print(f"sentiment_queries: {np.shape(sentiment_queries)} | {sentiment_queries}")
# print(f"sentiment_answers: {np.shape(sentiment_answers)} | {sentiment_answers}")
# print(f"forward_queries_seg: {np.shape(forward_queries_seg)} | {forward_queries_seg}")
# print(f"sentiment_queries_seg: {np.shape(sentiment_queries_seg)}")
temp_sample = TokenizedSample(
sample.original_sample, forward_queries,
forward_answers, sentiment_queries,
sentiment_answers, forward_queries_seg,
sentiment_queries_seg
)
# print(temp_sample)
tokenized_sample_list.append(temp_sample)
max_attributes = {
'mfor_asp_len': max_forward_asp_query_length,
'max_sent_len': max_sentiment_query_length,
'max_aspect_num': max_aspect_num
}
return tokenized_sample_list, max_attributes
def preprocessing(sample_list, max_len, mode='train'):
_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
_forward_asp_query = []
_forward_asp_answer_start = []
_forward_asp_answer_end = []
_forward_asp_query_mask = []
_forward_asp_query_seg = []
_sentiment_query = []
_sentiment_answer = []
_sentiment_query_mask = []
_sentiment_query_seg = []
_aspect_num = []
header_fmt = 'Preprocessing {:>5s}'
for instance in tqdm(sample_list, desc=f"{header_fmt.format(mode.upper())}"):
f_query_list = instance.forward_queries
f_answer_list = instance.forward_answers
f_query_seg_list = instance.forward_seg
s_query_list = instance.sentiment_queries
s_answer_list = instance.sentiment_answers
s_query_seg_list = instance.sentiment_seg
# _aspect_num: 1/2/3/...
_aspect_num.append(int(len(s_query_list)))
# Forward
# Aspect
# query
assert len(f_query_list[0]) == len(f_answer_list[0][0]) == len(f_answer_list[0][1])
f_asp_pad_num = max_len['mfor_asp_len'] - len(f_query_list[0])
_forward_asp_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in f_query_list[0]]))
_forward_asp_query[-1].extend([0] * f_asp_pad_num)
# query_mask
_forward_asp_query_mask.append([1 for i in range(len(f_query_list[0]))])
_forward_asp_query_mask[-1].extend([0] * f_asp_pad_num)
# answer
_forward_asp_answer_start.append(f_answer_list[0][0])
_forward_asp_answer_start[-1].extend([-1] * f_asp_pad_num)
_forward_asp_answer_end.append(f_answer_list[0][1])
_forward_asp_answer_end[-1].extend([-1] * f_asp_pad_num)
# seg
_forward_asp_query_seg.append(f_query_seg_list[0])
_forward_asp_query_seg[-1].extend([1] * f_asp_pad_num)
# Sentiment
single_sentiment_query = []
single_sentiment_query_mask = []
single_sentiment_query_seg = []
single_sentiment_answer = []
for j in range(len(s_query_list)):
sent_pad_num = max_len['max_sent_len'] - len(s_query_list[j])
single_sentiment_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in s_query_list[j]]))
single_sentiment_query[-1].extend([0] * sent_pad_num)
single_sentiment_query_mask.append([1 for i in range(len(s_query_list[j]))])
single_sentiment_query_mask[-1].extend([0] * sent_pad_num)
# query_seg
single_sentiment_query_seg.append(s_query_seg_list[j])
single_sentiment_query_seg[-1].extend([1] * sent_pad_num)
single_sentiment_answer.append(s_answer_list[j])
_sentiment_query.append(single_sentiment_query)
_sentiment_query[-1].extend(
[[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_mask.append(single_sentiment_query_mask)
_sentiment_query_mask[-1].extend(
[[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_seg.append(single_sentiment_query_seg)
_sentiment_query_seg[-1].extend(
[[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_answer.append(single_sentiment_answer)
_sentiment_answer[-1].extend([-1] * (max_len['max_aspect_num'] - _aspect_num[-1]))
result = {
"_forward_asp_query": _forward_asp_query,
"_forward_asp_answer_start": _forward_asp_answer_start,
"_forward_asp_answer_end": _forward_asp_answer_end,
"_forward_asp_query_mask": _forward_asp_query_mask,
"_forward_asp_query_seg": _forward_asp_query_seg,
"_sentiment_query": _sentiment_query,
"_sentiment_answer": _sentiment_answer,
"_sentiment_query_mask": _sentiment_query_mask,
"_sentiment_query_seg": _sentiment_query_seg,
"_aspect_num": _aspect_num,
}
return OriginalDataset(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./data/14rest/preprocess',
help='Path to the processed data from `data_process.py`')
parser.add_argument('--output_path', type=str, default='./data/14rest/preprocess',
help='Path to the saved data.')
args = parser.parse_args()
train_data_path = f"{args.data_path}/train_DUAL.pt"
dev_data_path = f"{args.data_path}/dev_DUAL.pt"
test_data_path = f"{args.data_path}/test_DUAL.pt"
train_data = torch.load(train_data_path)
dev_data = torch.load(dev_data_path)
test_data = torch.load(test_data_path)
train_tokenized, train_max_len = tokenize_data(train_data, mode='train')
dev_tokenized, dev_max_len = tokenize_data(dev_data, mode='dev')
test_tokenized, test_max_len = tokenize_data(test_data, mode='test')
print(f"\nMax attributes")
print(f"train_max_len : {train_max_len}")
print(f"dev_max_len : {dev_max_len}")
print(f"test_max_len : {test_max_len}\n")
train_preprocess = preprocessing(train_tokenized, train_max_len, mode='train')
dev_preprocess = preprocessing(dev_tokenized, dev_max_len, mode='dev')
test_preprocess = preprocessing(test_tokenized, test_max_len, mode='test')
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
output_path = f"{args.output_path}/data.pt"
print(f"Saved data : `{output_path}`.")
torch.save({
'train': train_preprocess,
'dev': dev_preprocess,
'test': test_preprocess
}, output_path)
|
the-stack_106_26526 | # -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - Qentinel Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------
from robot.api import logger
from selenium.common.exceptions import NoSuchWindowException
from QWeb.internal import browser, javascript, xhr, window, decorators
from QWeb.internal.exceptions import QWebDriverError, QWebValueError
from QWeb.internal.config_defaults import CONFIG
@decorators.timeout_decorator
def go_to(url, timeout=0): # pylint: disable=unused-argument
"""Switch current page to given url.
Examples
--------
.. code-block:: robotframework
GoTo http://google.com
GoTo file://resources/window.html
Parameters
----------
url : str
URL of the website that will be opened.
Raises
------
UnexpectedAlertPresentException
If the page opens with alert popup
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
driver.get(url)
def open_window():
"""Open new tab.
Uses javascript to do this so javascript has to be enabled.
Examples
--------
.. code-block:: robotframework
OpenWindow
"""
script = 'window.open()'
javascript.execute_javascript(script)
window_handles = window.get_window_handles()
current_window_handle = window.get_current_window_handle()
index = window_handles.index(current_window_handle)
new_window_index = index + 1
window.switch_to_window(window_handles[new_window_index])
try:
xhr.setup_xhr_monitor()
except QWebDriverError:
logger.debug('XHR monitor threw exception. Bypassing jQuery injection')
def close_others():
"""Close all windows except the first window.
If you have a test that may open new windows, this keyword closes them
and switches to the first window.
Examples
--------
.. code-block:: robotframework
CloseOthers
Raises
------
NoSuchWindowException
If other windows cannot been closed
"""
window_handles = window.get_window_handles()
logger.info("Current browser has {} tabs".format(len(window_handles)))
if len(window_handles) == 1:
return
driver = browser.get_current_browser()
while len(window_handles) > 1:
try:
window_handle = window_handles.pop()
window.switch_to_window(window_handle)
driver.close()
except NoSuchWindowException:
logger.info('Failed to close window')
first_window_handle = window_handles.pop()
window.switch_to_window(first_window_handle)
number_of_handles = len(window.get_window_handles())
if number_of_handles != 1:
raise Exception(
'Expected 1 window open, found {0}'.format(number_of_handles))
def close_window():
"""Close current tab and switch context to another window handle.
If you need to change to specific tab, use switch window keyword.
Examples
--------
.. code-block:: robotframework
CloseWindow
"""
driver = browser.get_current_browser()
window_handles = window.get_window_handles()
logger.info("Current browser has {} tabs".format(len(window_handles)))
if len(window_handles) == 1:
logger.info("Only one tab, handle closing without changing context")
browser.remove_from_browser_cache(driver) # remove from browser cache
driver.close()
else:
logger.info(
"Multiple tabs open, can change window context to another one")
current_window = window.get_current_window_handle()
current_index = window_handles.index(current_window)
logger.info("Current index {}".format(current_index))
driver.close()
# "refresh" window handles
window_handles = window.get_window_handles()
current_length = len(window_handles)
logger.info(
"After closing, {} tabs remain open".format(current_length))
# if current index is more than new length, move to last handle
if current_index > (len(window_handles) - 1):
window.switch_to_window(window_handles[(current_index - 1)])
# move to next window (as browsers do)
else:
window.switch_to_window(window_handles[current_index])
logger.info("Changed context to tab with url {}".format(
window.get_url()))
@decorators.timeout_decorator
def switch_window(index, timeout=0): # pylint: disable=unused-argument
"""Switch to another tab.
Examples
--------
.. code-block:: robotframework
SwitchWindow 1
SwitchWindow NEW # Switches to latest opened tab
Parameters
----------
index : str
Index of the tab starting from one and counting from left to right.
OR
Special keyword "NEW" which can be used to move to the latest opened tab.
timeout : str | int
How long we search before failing.
Raises
------
ValueError
If the window index is out of reach
"""
window_handles = window.get_window_handles()
logger.info("Current browser contains {} tabs".format(len(window_handles)))
if index.isdigit():
if int(index) == 0:
raise QWebValueError('SwitchWindow index starts at 1.')
i = int(index) - 1
if i < len(window_handles):
correct_window_handle = window_handles[i]
window.switch_to_window(correct_window_handle)
return
logger.debug('Tried to select tab with index {} but there'
' are only {} tabs open'.format(index, len(window_handles)))
elif index == "NEW":
window.switch_to_window(window_handles[-1])
return
else:
raise QWebValueError(
'Given argument "{}" is not a digit or NEW'.format(index))
raise QWebDriverError(
'Tried to select tab with index {} but there are only {} tabs open'
.format(index, len(window_handles)))
def set_window_size(width, height):
"""*DEPRECATED!!* Use keyword `SetConfig` instead.
Set current window size.
Examples
--------
.. code-block:: robotframework
SetWindowSize 1920 1080
Parameters
----------
width : int
The width value of the window
height: int
The height value of the window
"""
width = int(width)
height = int(height)
driver = browser.get_current_browser()
driver.set_window_size(width, height)
def maximize_window():
"""Maximizes current browser window.
Note: This keyword will not fail if maximizing is prevented for some reason.
This can happen for example if window manager is not installed or setup correctly.
Examples
--------
.. code-block:: robotframework
MaximizeWindow
Parameters
----------
None
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
if CONFIG.get_value('Headless') is True:
logger.debug("Maximizing browser in headless mode")
screen_width_js = driver.execute_script("return screen.width")
screen_height_js = driver.execute_script("return screen.height")
driver.set_window_size(screen_width_js, screen_height_js)
else:
driver.maximize_window()
size = driver.get_window_size()
logger.debug("Window size set to {}x{}".format(size["width"], size["height"]))
def get_url():
"""Gets current url/location.
Examples
--------
.. code-block:: robotframework
${url}= GetUrl
Parameters
----------
None
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
return driver.current_url
@decorators.timeout_decorator
def verify_url(url, timeout=0): # pylint: disable=unused-argument
"""Verifies that current page url/location matches expected url.
Examples
--------
.. code-block:: robotframework
VerifyUrl https://www.google.com
VerifyUrl https://www.google.com timeout=5
Parameters
----------
url : str
The expected url
timeout : str | int
How long we wait for url to change before failing.
Raises
------
QWebValueError
If the expected url differs from current url
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
current = driver.current_url
if current.lower() != url.lower():
raise QWebValueError(f"Current url '{current}'' does not match expected url '{url}'")
def get_title():
"""Gets the title of current page/window.
Examples
--------
.. code-block:: robotframework
${title}= GetTitle
Parameters
----------
None
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
return driver.title
@decorators.timeout_decorator
def verify_title(title, timeout=0): # pylint: disable=unused-argument
"""Verifies that current page's title matches expected title.
Examples
--------
.. code-block:: robotframework
VerifyTitle Google
VerifyTitle Google timeout=3
Parameters
----------
title : str
The expected title
timeout : str | int
How long we wait for title to change before failing.
Raises
------
QWebValueError
If the expected title differs from actual page title
"""
driver = browser.get_current_browser()
if driver is None:
raise QWebDriverError("No browser open. Use OpenBrowser keyword"
" to open browser first")
actual = driver.title
if actual != title:
raise QWebValueError(f"Page title '{actual}'' does not match expected '{title}'")
def swipe_down(times='1', start=None):
"""Swipes down on the screen.
Examples
--------
.. code-block:: robotframework
SwipeDown # Swipes down once
SwipeDown 5 # Swipes down five times
SwipeDown 1 Qentinel Touch # Swipes down once, starting from the text "Qentinel Touch"
SwipeDown 5 Qentinel Touch # Swipes down five times, from the text "Qentinel Touch"
Parameters
----------
times : str
The amount of times we swipe / length of the swipe
start : str
Optional starting point for the swipe
Raises
------
ValueError
If the swipe amount is not an integer.
"""
window.swipe('down', times, start)
def swipe_up(times='1', start=None):
"""Swipes up on the screen.
Examples
--------
.. code-block:: robotframework
SwipeUp # Swipes up once
SwipeUp 5 # Swipes up five times
SwipeUp 1 Qentinel Touch # Swipes up once, from the text "Qentinel Touch"
SwipeUp 5 Qentinel Touch # Swipes up five times, from the text "Qentinel Touch"
Parameters
----------
times : str
The amount of times swiped / length of the swipe
start : str
Optional starting point for the swipe
Raises
------
ValueError
If the swipe amount is not an integer.
"""
window.swipe('up', times, start)
def swipe_left(times='1', start=None):
"""Swipes left on the screen.
Examples
--------
.. code-block:: robotframework
SwipeLeft # Swipes left once
SwipeLeft 5 # Swipes left five times
SwipeLeft 1 Qentinel Touch # Swipes left once, from the text "Qentinel Touch"
SwipeLeft 5 Qentinel Touch # Swipes left five times, from the text "Qentinel Touch"
Parameters
----------
times : str
The amount of times swiped / length of the swipe
start : str
Optional starting point for the swipe
Raises
------
ValueError
If the swipe amount is not an integer.
"""
window.swipe('left', times, start)
def swipe_right(times='1', start=None):
"""Swipes right on the screen.
Examples
--------
.. code-block:: robotframework
SwipeRight # Swipes right once
SwipeRight 5 # Swipes right five times
SwipeRight 1 Qentinel Touch # Swipes right once, from the text "Qentinel Touch"
SwipeRight 5 Qentinel Touch # Swipes right five times, from the text "Qentinel Touch"
Parameters
----------
times : str
The amount of times swiped / length of the swipe
start : str
Optional starting point for the swipe
Raises
------
ValueError
If the swipe amount is not an integer.
"""
window.swipe('right', times, start)
|
the-stack_106_26530 |
from time import time
import tumor2d
from fitmulticell.sumstat import SummaryStatistics as ss
import matplotlib.pyplot as plt
from string import capwords
import os
import pyabc
from fitmulticell.model import MorpheusModel
import numpy as np
import scipy
def eucl_dist(sim, obs):
total = 0
for key in sim:
if key in 'loc':
continue
total += scipy.stats.ks_2samp(sim[key], obs[key]).statistic
return total
pop_size = 2
min_eps = 750
min_eps_ori = min_eps
max_nr_pop = 2
# logfilepath = "/home/emad/Insync/[email protected]/Google_Drive/Bonn/Github/FMC_paper/PEtab_problems/Code/Tumor_2d/TumorStats.txt"
problempath = "/home/emad/Insync/[email protected]/Google_Drive/Bonn/Github/FMC_paper/PEtab_problems/Tumor_2D/Tumour_Spheroid_ScenI_1e.xml"
par_map = {'k_div_max': './Global/Constant[@symbol="k_div_max"]',
'L_init': './Global/Constant[@symbol="L_init"]',
'q_init': './Global/Constant[@symbol="q_init"]',
'L_div': './Global/Constant[@symbol="L_div"]',
'ke_pro': './Global/Constant[@symbol="ke_pro"]',
'ke_deg': './Global/Constant[@symbol="ke_deg"]',
'e_div': './Global/Constant[@symbol="e_div"]',
}
start_time = time()
observation_par = {"k_div_max": 4.17e-2,
"L_init": 1.2e1,
"q_init": 7.5e-1,
"L_div": 100,
"ke_pro": 5e-3,
"ke_deg": 8e-4,
"e_div": 1e-2}
sumstat = ss(output_file="logger_1.csv", ignore=["cell.id", "time"])
model = MorpheusModel(
model_file=problempath,
par_map=par_map,
executable="/home/emad/morpheus-2.2.5",
sumstat=sumstat,
)
observation_morpheus = model.sample(observation_par)
model.par_scale = "log10"
# observation_origin = tumor2d.simulate(division_rate=4.17e-2,
# initial_spheroid_radius=1.2e1,
# initial_quiescent_cell_fraction=7.5e-1,
# division_depth=100,
# ecm_production_rate=5e-3,
# ecm_degradation_rate=8e-4,
# ecm_division_threshold=1e-2)
limits = dict(k_div_max=(-3, -1),
L_init=(1, 3),
q_init=(0, 1.2),
L_div=(-5, 0),
ke_pro=(-5, 0),
ke_deg=(-5, 0),
e_div=(-5, 0))
#
prior = pyabc.Distribution(**{key: pyabc.RV("uniform", a, b - a)
for key, (a, b) in limits.items()})
# data_mean = tumor2d.load_default()[1] # (raw, mean, var)
# In[6]:
# redis_sampler = pyabc.sampler.RedisEvalParallelSampler(host=host, port=port, look_ahead = False)
abc = pyabc.ABCSMC(models=model,
parameter_priors=prior,
distance_function=eucl_dist,
population_size=pop_size)
db_path = "sqlite:///" + "/tmp/" + "test_14param_Felipe.db"
abc.new(db_path, observation_morpheus)
history_f = abc.run(max_nr_populations=max_nr_pop, minimum_epsilon=min_eps_ori)
# petab_problem_path = "/home/emad/Insync/[email protected]/Google_Drive/Bonn/Github/FMC_paper" + '/PEtab_problems' + '/Tumor_2D' + '/Tumor_2D.yaml'
# petab_problem = petab_MS.Problem.from_yaml(petab_problem_path)
# importer = PetabImporter(petab_problem)
# PEtab_prior = importer.create_prior()
# par_map_imported = importer.get_par_map()
# obs_pars_imported = petab_problem.get_x_nominal_dict(scaled=True)
# PEtab_par_scale = petab_problem.get_optimization_parameter_scales()
# dict_data_imported = petab_problem.get_measurement_dict()
# PEtab_model = importer.create_model()
# PEtab_model.timeout = 900
# PEtab_model.ignore_list = ["cell.id", "Tension", "time"]
#
# PEtab_tryjectory = PEtab_model.sample(obs_pars_imported)
# model_dir = "/home/emad/Insync/[email protected]/Google_Drive/Bonn/Github/FMC_paper" + '/PEtab_problems' + '/Liver_regeneration' + '/YAP_Signaling_Liver_Regeneration_Model_reparametrized_further.xml'
#
# abc = pyabc.ABCSMC(PEtab_model, PEtab_prior, eucl_dist, population_size=2,
# eps=QuantileEpsilon(alpha=0.3), all_accepted=False)
#
# db_path = ("sqlite:///" +
# os.path.join(tempfile.gettempdir(), "test.db"))
# history = abc.new(db_path, dict_data_imported)
# abc.run(max_nr_populations=2)
|
the-stack_106_26531 | import discord
from discord.ext import commands
import io
import textwrap
import os
import traceback
from contextlib import redirect_stdout
from Admin.admin import Files
intents = discord.Intents().default()
intents.members = True
bot = commands.Bot(command_prefix=Files.config("main","prefix"), intents=intents, case_insensitive=True, owner_ids=Files.config("main", "managers"))
bot.remove_command("help")
def is_owner():
def predicate(ctx):
return ctx.author.id in bot.owner_ids
return commands.check(predicate)
@is_owner()
@bot.command(aliases=["e"])
async def eval(ctx, *, body: str):
raw = False
"""Evaluates a code"""
env = {
'bot': bot,
'ctx': ctx,
'channel': ctx.message.channel,
'author': ctx.message.author,
'guild': ctx.message.guild,
'message': ctx.message,
}
env.update(globals())
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except:
pass
if ret is None:
if value:
if raw:
await ctx.send(f"{value}")
else:
await ctx.send(f'```py\n{value}\n```')
else:
pass
@bot.event
async def on_ready():
print("Bot is ready!")
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="over BytesToBits"))
@is_owner()
@bot.command(hidden=True)
async def load(ctx, *, module):
try:
bot.load_extension(f"cogs.{module}")
except commands.ExtensionError as e:
await ctx.send(f'{e.__class__.__name__}: {e}')
else:
embed=discord.Embed(title=f"Loaded {str(module).capitalize()}", description=f"Successfully loaded cogs.{str(module).lower()}!", color=0x2cf818)
await ctx.send(embed=embed)
@is_owner()
@bot.command(hidden=True)
async def unload(ctx, *, module):
try:
bot.unload_extension(f"cogs.{module}")
except commands.ExtensionError as e:
await ctx.send(f'{e.__class__.__name__}: {e}')
else:
embed=discord.Embed(title=f"Unloaded {str(module).capitalize()}", description=f"Successfully unloaded cogs.{str(module).lower()}!", color=0xeb1b2c)
await ctx.send(embed=embed)
@is_owner()
@bot.command(name="reload")
async def _reload(ctx, *, module):
try:
bot.reload_extension(f"cogs.{module}")
except commands.ExtensionError as e:
await ctx.send(f'{e.__class__.__name__}: {e}')
else:
embed=discord.Embed(title=f"Reloaded {str(module).capitalize()}", description=f"Successfully reloaded cogs.{str(module).lower()}!", color=0x00d4ff)
await ctx.send(embed=embed)
for i in os.listdir("cogs"):
if i == "staff": pass
else:
cog = i[:-3]
try:
bot.load_extension(f"cogs.{cog}")
print(f"Loaded Main.{cog}")
except Exception as e:
print(e)
bot.run(Files.config("main", "token"))
|
the-stack_106_26532 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
from __future__ import absolute_import
import argparse
import json
import logging
import sys
from apache_beam.metrics import MetricsFilter
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsReader
from apache_beam.testing.test_pipeline import TestPipeline
class LoadTestOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
'--publish_to_big_query',
type=cls._str_to_boolean,
help='Publishes pipeline metrics to BigQuery table.')
parser.add_argument(
'--metrics_dataset',
help='A BigQuery dataset where metrics should be'
'written.')
parser.add_argument(
'--metrics_table',
help='A BigQuery table where metrics should be '
'written.')
parser.add_argument(
'--input_options',
type=json.loads,
help='Input specification of SyntheticSource.')
@staticmethod
def _str_to_boolean(value):
try:
return bool(['false', 'true'].index(value.lower()))
except ValueError:
raise argparse.ArgumentTypeError(
'"true" or "false" expected, got "{}" '
'instead.'.format(value))
class LoadTest(object):
def __init__(self):
self.pipeline = TestPipeline(is_integration_test=True)
load_test_options = self.pipeline.get_pipeline_options().view_as(
LoadTestOptions)
self.input_options = load_test_options.input_options
self.metrics_namespace = load_test_options.metrics_table or 'default'
publish_to_bq = load_test_options.publish_to_big_query
if publish_to_bq is None:
logging.info(
'Missing --publish_to_big_query option. Metrics will not '
'be published to BigQuery.')
if load_test_options.input_options is None:
logging.error('--input_options argument is required.')
sys.exit(1)
gcloud_options = self.pipeline.get_pipeline_options().view_as(
GoogleCloudOptions)
self.project_id = gcloud_options.project
self._metrics_monitor = MetricsReader(
publish_to_bq=publish_to_bq,
project_name=self.project_id,
bq_table=load_test_options.metrics_table,
bq_dataset=load_test_options.metrics_dataset,
# Apply filter to prevent system metrics from being published
filters=MetricsFilter().with_namespace(self.metrics_namespace))
def test(self):
"""An abstract method where the pipeline definition should be put."""
pass
def cleanup(self):
"""An abstract method that executes after the test method."""
pass
def run(self):
try:
self.test()
if not hasattr(self, 'result'):
self.result = self.pipeline.run()
self.result.wait_until_finish()
self._metrics_monitor.publish_metrics(self.result)
finally:
self.cleanup()
def parse_synthetic_source_options(self, options=None):
if not options:
options = self.input_options
return {
'numRecords': options.get('num_records'),
'keySizeBytes': options.get('key_size'),
'valueSizeBytes': options.get('value_size'),
'hotKeyFraction': options.get('hot_key_fraction', 0),
'numHotKeys': options.get('num_hot_keys', 0),
'bundleSizeDistribution': {
'type': options.get('bundle_size_distribution_type', 'const'),
'param': options.get('bundle_size_distribution_param', 0)
},
'forceNumInitialBundles': options.get('force_initial_num_bundles', 0)
}
def get_option_or_default(self, opt_name, default=0):
"""Returns a pipeline option or a default value if it was not provided.
The returned value is converted to an integer.
"""
option = self.pipeline.get_option(opt_name)
try:
return int(option)
except TypeError:
return default
|
the-stack_106_26533 | from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input
from keras.utils.data_utils import get_file
import keras.backend as K
import h5py
import numpy as np
import tensorflow as tf
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
MEAN_PIXEL = np.array([103.939, 116.779, 123.68])
WEIGHTS_PATH = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
def vgg_layers(inputs, target_layer):
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
if target_layer == 1:
return x
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
if target_layer == 2:
return x
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
if target_layer == 3:
return x
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
if target_layer == 4:
return x
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
return x
def load_weights(model):
f = h5py.File(WEIGHTS_PATH)
layer_names = [name for name in f.attrs['layer_names']]
for layer in model.layers:
b_name = layer.name.encode()
if b_name in layer_names:
g = f[b_name]
weights = [g[name] for name in g.attrs['weight_names']]
layer.set_weights(weights)
layer.trainable = False
f.close()
def VGG19(input_tensor=None, input_shape=None, target_layer=1):
"""
VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
"""
if input_tensor is None:
inputs = Input(shape=input_shape)
else:
inputs = Input(tensor=input_tensor, shape=input_shape)
model = Model(inputs, vgg_layers(inputs, target_layer), name='vgg19')
load_weights(model)
return model
def preprocess_input(x):
# Convert 'RGB' -> 'BGR'
if type(x) is np.ndarray:
x = x[..., ::-1]
else:
x = tf.reverse(x, [-1])
return x - MEAN_PIXEL
|
the-stack_106_26535 | import numpy as np
class RLSFilterAnalyticIntercept():
"""
Class representing the state of a recursive least squares estimator with
intercept estimation.
"""
def __init__(self, input_dim, output_dim, alpha=1.0, forgetting_factor=1.0):
self.input_dim = input_dim
self.output_dim = output_dim
self.param_dim = input_dim
self.alpha = alpha
self.forgetting_factor = forgetting_factor
self.t = 0.0
self.intercept = np.zeros((self.output_dim, 1))
self.theta = np.zeros((self.param_dim, self.output_dim))
self.corrected_theta = np.zeros_like(self.theta)
self.feat_mean = np.zeros((1, self.param_dim))
self.output_mean = np.zeros((1, self.output_dim))
self.covar = np.eye(self.param_dim) * alpha
def _make_feature_vec(self, in_vec):
assert(in_vec.shape == (self.input_dim, 1))
return in_vec.transpose()
def _update_covar(self, U, C, V):
assert(U.shape == (self.param_dim, 2))
assert(C.shape == (2, 2))
assert(V.shape == (2, self.param_dim))
inv_part = np.linalg.inv(C) + V.dot(self.covar).dot(U)
update = self.covar.dot(U).dot(np.linalg.inv(inv_part)).dot(V).dot(self.covar)
self.covar = (1.0 / self.forgetting_factor ** 2) * (self.covar - update)
def _update_theta(self, C_t, feat, output):
assert(feat.shape == (1, self.param_dim))
assert(output.shape == (self.output_dim, 1))
assert(C_t.shape == (self.param_dim, self.param_dim))
inner_term = feat.transpose().dot(output.transpose()) - C_t.dot(self.theta)
update = self.covar.dot(inner_term)
self.theta = self.theta + update
def _update_output_mean(self, output):
assert(output.shape == (self.output_dim, 1))
self.output_mean = (self.forgetting_factor * self.output_mean) + (1.0 / self.t) * (output.transpose() - (self.forgetting_factor * self.output_mean))
def _update_feat_mean(self, feat):
assert(feat.shape == (1, self.param_dim))
self.feat_mean = (self.forgetting_factor * self.feat_mean) + (1.0 / self.t) * (feat - (self.forgetting_factor * self.feat_mean))
def _make_U(self, feat):
assert(feat.shape == (1, self.param_dim))
return np.block([self.forgetting_factor * self.feat_mean.transpose(), feat.transpose()])
def _make_V(self, feat):
assert(feat.shape == (1, self.param_dim))
return np.block([[self.forgetting_factor * self.feat_mean],[feat]])
def _make_C(self):
return (1 / ((self.forgetting_factor * self.t) ** 2)) * np.array([[((2.0 * self.t - 1.0) ** 2) - 2.0 * (self.t ** 2), -(2.0 * self.t - 1.0) * (self.t - 1.0)],
[-(2.0 * self.t - 1.0) * (self.t - 1.0), (self.t - 1.0) ** 2]])
def process_datum(self, in_vec, output):
feat = self._make_feature_vec(in_vec)
self.t += 1.0
if self.t == 1.0:
self._update_feat_mean(feat)
self._update_output_mean(output)
return
U = self._make_U(feat)
V = self._make_V(feat)
C = self._make_C()
C_t = U.dot(C).dot(V)
self._update_covar(U, C, V)
self._update_output_mean(output)
self._update_feat_mean(feat)
self._update_theta(C_t, feat, output)
self.corrected_theta = self.theta - ((2 * self.t - 1) * self.covar.dot(self.feat_mean.transpose()).dot(self.output_mean))
self.intercept = (self.output_mean - self.feat_mean.dot(self.corrected_theta)).transpose()
def get_identified_mats(self):
"""
Returns the current estimated A, B, and c matrices.
"""
return self.corrected_theta.transpose(), self.intercept
def predict(self, in_vec):
feat = self._make_feature_vec(in_vec)
prediction = feat.dot(self.corrected_theta).transpose() + self.intercept
assert(prediction.shape == (self.output_dim, 1))
return prediction
class RecursiveLassoFilter():
def __init__(self, input_dim, output_dim, alpha=1.0, forgetting_factor=1.0, gamma=1.0):
self.input_dim = input_dim
self.output_dim = output_dim
self.param_dim = input_dim
self.alpha = alpha
self.gamma = gamma
self.forgetting_factor = forgetting_factor
self.intercept = np.zeros((self.output_dim, 1))
self.theta = np.zeros((self.param_dim, self.output_dim))
self.ls_filter = RLSFilterAnalyticIntercept(input_dim, output_dim, alpha=alpha, forgetting_factor=forgetting_factor)
def _update_params(self):
mod_ls_theta = np.abs(self.ls_filter.corrected_theta) - self.gamma
mod_ls_intercept = np.abs(self.ls_filter.intercept) - self.gamma
mod_ls_theta[mod_ls_theta < 0.0] = 0.0
mod_ls_intercept[mod_ls_intercept < 0.0] = 0.0
self.theta = np.sign(self.ls_filter.corrected_theta) * mod_ls_theta
self.intercept = np.sign(self.ls_filter.intercept) * mod_ls_intercept
def process_datum(self, in_vec, output):
self.ls_filter.process_datum(in_vec, output)
self._update_params()
def get_identified_mats(self):
return self.theta.transpose(), self.intercept
def predict(self, in_vec):
feat = self.ls_filter._make_feature_vec(in_vec)
prediction = feat.dot(self.theta).transpose() + self.intercept
assert(prediction.shape == (self.output_dim, 1))
return prediction
|
the-stack_106_26536 | #!/usr/bin/env python3
# Copyright (c) 2020 The DIVI developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Tests the workflow for setting up a masternode vault (with prepared
# unvault tx and destroyed private key), running the masternode with it
# and unvaulting the funds later.
#
# We use seven nodes:
# - node 0 is used to fund and unvault the masternode
# - node 1 is the "hot" masternode
# - node 2 holds the "temporary" vault key and can sign with it
# (but we use it sparingly)
# - nodes 3-6 are just used to get above the "three full nodes" threshold
from test_framework import BitcoinTestFramework
from util import *
from messages import *
from masternode import *
from binascii import unhexlify
import time
class MnVaultsTest (BitcoinTestFramework):
def __init__ (self):
super (MnVaultsTest, self).__init__ ()
self.base_args = ["-debug=masternode", "-debug=mocktime"]
self.cfg = None
def setup_chain (self):
for i in range (7):
initialize_datadir (self.options.tmpdir, i)
def setup_network (self, config_line=None, extra_args=[]):
# The masternode starts off, the others are online initially.
self.nodes = [
start_node (0, self.options.tmpdir, extra_args=self.base_args),
None,
] + [
start_node (i, self.options.tmpdir, extra_args=self.base_args)
for i in [2, 3, 4, 5, 6]
]
# We want to work with mock times that are beyond the genesis
# block timestamp but before current time (so that nodes being
# started up and before they get on mocktime aren't rejecting
# the on-disk blockchain).
self.time = 1580000000
assert self.time < time.time ()
set_node_times (self.nodes, self.time)
# Nodes 3-5 are connected between each other, and the cluster is
# also connected to nodes 0-2.
connect_nodes (self.nodes[3], 4)
connect_nodes (self.nodes[3], 5)
connect_nodes (self.nodes[3], 6)
connect_nodes (self.nodes[4], 5)
connect_nodes (self.nodes[4], 6)
connect_nodes (self.nodes[5], 6)
for i in [0, 2]:
connect_nodes (self.nodes[i], 3)
connect_nodes (self.nodes[i], 4)
connect_nodes (self.nodes[i], 5)
connect_nodes (self.nodes[i], 6)
self.is_network_split = False
def start_node (self, n):
"""Starts node n with the proper arguments
and masternode config for it."""
args = self.base_args
if n == 1:
args.append ("-masternode")
args.append ("-masternodeprivkey=%s" % self.cfg.privkey)
if self.cfg:
cfg = [self.cfg.line]
else:
cfg = []
self.nodes[n] = start_node (n, self.options.tmpdir,
extra_args=args, mn_config_lines=cfg)
self.nodes[n].setmocktime (self.time)
for i in [3, 4, 5, 6]:
connect_nodes (self.nodes[n], i)
sync_blocks (self.nodes)
def stop_node (self, n):
stop_node (self.nodes[n], n)
self.nodes[n] = None
def advance_time (self, dt=1):
"""Advances mocktime by the given number of seconds."""
self.time += dt
set_node_times (self.nodes, self.time)
def mine_blocks (self, n):
"""Mines blocks with node 3."""
sync_mempools (self.nodes)
self.nodes[3].setgenerate(True, n)
sync_blocks (self.nodes)
def run_test (self):
self.fund_vault ()
self.start_masternode ()
self.get_payments ()
self.unvault ()
def fund_vault (self):
print ("Funding masternode vault...")
self.nodes[0].setgenerate (True, 5)
sync_blocks (self.nodes)
self.mine_blocks (20)
addr = self.nodes[2].getnewaddress ()
privkey = self.nodes[2].dumpprivkey (addr)
amount = 100
txid = self.nodes[0].sendtoaddress (addr, amount)
raw = self.nodes[0].getrawtransaction (txid, 1)
vout = None
for i in range (len (raw["vout"])):
o = raw["vout"][i]
if addr in o["scriptPubKey"]["addresses"]:
vout = i
break
assert vout is not None
unvaultAddr = self.nodes[0].getnewaddress ("unvaulted")
data = self.nodes[0].validateaddress (unvaultAddr)
tx = CTransaction ()
tx.vin.append (CTxIn (COutPoint (txid=txid, n=vout)))
tx.vout.append (CTxOut (amount * COIN, unhexlify (data["scriptPubKey"])))
unsigned = ToHex (tx)
validated = self.nodes[0].validateaddress (addr)
script = validated["scriptPubKey"]
prevtx = [{"txid": txid, "vout": vout, "scriptPubKey": script}]
signed = self.nodes[0].signrawtransaction (unsigned, prevtx, [privkey],
"SINGLE|ANYONECANPAY")
assert_equal (signed["complete"], True)
self.unvaultTx = signed["hex"]
self.cfg = fund_masternode (self.nodes[0], "mn", "copper", txid,
"localhost:%d" % p2p_port (1))
# FIXME: Use reward address from node 0.
self.cfg.rewardAddr = addr
for i in [0, 2]:
self.stop_node (i)
self.start_node (i)
# Prepare the masternode activation broadcast, without actually
# relaying it to the network. After this is done, node 2 with the
# "temporary" private key is no longer needed at all, and can be
# shut down for the rest of the test.
bc = self.nodes[2].startmasternode ("mn", True)
assert_equal (bc["status"], "success")
self.broadcast = bc["broadcastData"]
self.stop_node (2)
self.mine_blocks (20)
def start_masternode (self):
print ("Starting masternode from vault...")
# Advance some time to simulate starting the node later (e.g. also when
# restarting it as necessary during operation).
for _ in range (100):
self.advance_time (100)
time.sleep(0.01)
# Due to advancing the time without having any masternodes, sync will
# have failed on the nodes that are up. Reset the sync now to make
# sure they will then properly sync together with the other nodes
# after we start our masternode.
for n in self.nodes:
if n is not None:
n.mnsync ("reset")
# Now start and activate the masternode based on the stored
# broadcast message.
self.start_node (1)
bc = self.nodes[1].broadcaststartmasternode (self.broadcast, "update_ping")
assert_equal (bc["status"], "success")
# Finish masternode sync.
for _ in range (100):
self.advance_time ()
time.sleep(0.01)
for n in self.nodes:
if n is not None:
status = n.mnsync ("status")
assert_equal (status["currentMasternodeSyncStatus"], 999)
# Check that the masternode is indeed active.
data = self.nodes[1].getmasternodestatus ()
assert_equal (data["status"], 4)
assert_equal (data["message"], "Masternode successfully started")
def get_payments (self):
print ("Receiving masternode payments...")
# For payments, the masternode needs to be active at least 8000 seconds
# and we also need at least 100 blocks. We also need some extra
# leeway in the time due to the one hour we add to the current time
# when signing a collateral that is not yet 15 times confirmed.
self.mine_blocks (100)
for _ in range (150):
self.advance_time (100)
time.sleep(0.01)
cnt = self.nodes[3].getmasternodecount ()
assert_equal (cnt["total"], 1)
assert_equal (cnt["enabled"], 1)
assert_equal (cnt["inqueue"], 1)
# Mine some blocks, but advance the time in between and do it
# one by one so the masternode winners can get broadcast between
# blocks and such.
for _ in range (10):
self.mine_blocks (1)
self.advance_time (10)
time.sleep(0.01)
# Check that some payments were made.
winners = self.nodes[3].getmasternodewinners ()
found = False
for w in winners:
if w["winner"]["address"] == self.cfg.rewardAddr:
found = True
break
assert_equal (found, True)
# FIXME: Check in wallet when we have a custom reward address.
def unvault (self):
print ("Unvaulting the funds...")
# The prepared unvaulting tx is just a single input/output pair
# with no fee attached. To add the transaction fee, we add another
# input and output, which is fine due to the SINGLE|ANYONECANPAY signature
# that we used.
fee = Decimal ('0.10000000')
inp = self.nodes[0].listunspent ()[0]
change = int ((inp["amount"] - fee) * COIN)
assert_greater_than (change, 0)
changeAddr = self.nodes[0].getnewaddress ()
data = self.nodes[0].validateaddress (changeAddr)
tx = FromHex (CTransaction (), self.unvaultTx)
tx.vin.append (CTxIn (COutPoint (txid=inp["txid"], n=inp["vout"])))
tx.vout.append (CTxOut (change, unhexlify (data["scriptPubKey"])))
partial = ToHex (tx)
signed = self.nodes[0].signrawtransaction (partial)
assert_equal (signed["complete"], True)
self.nodes[0].sendrawtransaction (signed["hex"])
self.mine_blocks (1)
assert_equal (self.nodes[0].getbalance ("unvaulted"), 100)
if __name__ == '__main__':
MnVaultsTest ().main ()
|
the-stack_106_26537 | """Function to compare global distributions of turnover time."""
import os.path
import iris
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import scipy.stats as stats
from esmvaltool.diag_scripts.shared import (
ProvenanceLogger,
get_diagnostic_filename,
get_plot_filename,
group_metadata,
run_diagnostic,
)
import esmvaltool.diag_scripts.land_carbon_cycle.plot_utils as plut
from esmvaltool.diag_scripts.land_carbon_cycle.shared import (
_apply_common_mask,
_load_variable,
_remove_invalid,
_var_name_constraint,
)
from esmvaltool.diag_scripts.land_carbon_cycle.provenance import (
_get_ancestor_files,
_get_provenance_record,
)
# set the properties of the lines used for hatching
mpl.rcParams['hatch.color'] = 'yellow'
mpl.rcParams['hatch.linewidth'] = 0.7
# Figure settings and colorbar info
def _get_diagonal_colorbar_info():
"""
Get dictionary of colormap and colorbar information for diagonal maps.
needed for plotting the maps along the diagonal, i.e., the maps of turnover
time
"""
cb_info_diagonal = {}
cb_name = 'plasma_r'
cb_info_diagonal['tickBounds'] = np.concatenate(
([1], np.linspace(8, 16, num=10)[:-1], np.linspace(16, 32,
num=10)[:-1],
np.linspace(32, 64, num=10)[:-1], np.linspace(64, 128, num=10)[:-1],
np.linspace(128, 256,
num=10)[:-1], np.linspace(256, 1000, num=2,
endpoint=True)))
cb_info_diagonal['ticksLoc'] = np.array([1, 8, 16, 32, 64, 128, 256])
clist_ = plut.get_colomap(cb_name,
cb_info_diagonal['tickBounds'],
lowp=0.,
hip=1)
cb_info_diagonal['colMap'] = mpl.colors.ListedColormap(clist_)
return cb_info_diagonal
def _get_fig_config(diag_config):
"""
Get figure setting and configurations.
default settings of the figure, and replace default with
runtime settings from recipe
Argument:
--------
diag_config - nested dictionary of metadata
Return:
------
a dictionary of settings
"""
nmodels = len(group_metadata(diag_config['input_data'].values(),
'dataset')) + 1
w_pl = 1. / nmodels
h_pl = w_pl
aspect_map = 0.5
fig_config = {
# generic settings
'ax_fs': 7.1,
'fill_value': np.nan,
# settings of the figure and maps
'x0': 0.02,
'y0': 1.0,
'wp': w_pl,
'hp': h_pl,
'xsp': 0.0,
'ysp': -0.03,
'aspect_map': aspect_map,
# settings for the location of scatterplots
'xsp_sca': w_pl / 3 * aspect_map,
'ysp_sca': h_pl / 3 * aspect_map,
# colorbar specific settings
'hcolo': 0.0123,
'wcolo': 0.25,
'cb_off_y': 0.06158,
'x_colo_d': 0.02,
'x_colo_r': 0.76,
'y_colo_single': 0.1086,
# the correlation method for metric
# given in the title of the scatterplot
'correlation_method': 'spearman',
'tx_y_corr': 1.075,
# define the range of data and masks
'valrange_sc': (2, 256),
'obs_global': 23,
'gpp_threshold': 0.01
}
# replace default values with those provided in recipe
fig_config.update(diag_config.get('fig_config'))
return fig_config
def _get_ratio_colorbar_info():
"""
Get dictionary of colormap and colorbar information for off-diagonal maps.
The maps of ratios above the diagonal.
"""
cb_info_ratio = {}
border = 0.9
ncolo = 128
num_gr = int(ncolo // 4)
num_col = num_gr - 4
# get the colormap
cb_info_ratio['tickBounds'] = np.concatenate(
(np.geomspace(0.2, 0.25,
num=num_col), np.geomspace(0.25, 0.33, num=num_col),
np.geomspace(0.33, 0.5,
num=num_col), np.geomspace(0.5, border, num=num_col),
np.linspace(border, 1 / border,
num=num_gr), np.geomspace(1 / border, 2, num=num_col),
np.geomspace(2, 3, num=num_col), np.geomspace(3, 4, num=num_col),
np.geomspace(4, 5, num=num_col)))
colors1 = plt.cm.Blues(np.linspace(0.15, 0.998, (num_col) * 4))[::-1]
colorsgr = np.tile(np.array([0.8, 0.8, 0.8, 1]),
num_gr).reshape(num_gr, -1)
colors2 = plt.cm.Reds(np.linspace(0.15, 0.998, (num_col) * 4))
# combine them and build a new colormap
colors1g = np.vstack((colors1, colorsgr))
colors = np.vstack((colors1g, colors2))
cb_info_ratio['colMap'] = mpl.colors.LinearSegmentedColormap.from_list(
'my_colormap', colors)
cb_info_ratio['ticksLoc'] = [0.2, 0.25, 0.33, 0.5, 0.9, 1.1, 2, 3, 4, 5]
cb_info_ratio['ticksLab'] = [
'$\\dfrac{1}{5}$', '$\\dfrac{1}{4}$', '$\\dfrac{1}{3}$',
'$\\dfrac{1}{2}$', '$\\dfrac{1}{1.1}$', '$1.1$', '$2$', '$3$', '$4$',
'$5$'
]
return cb_info_ratio
def _get_agreement_mask(mmdat, dat_5, dat_95, nmodel_reject=2):
"""
Get mask of multimodel agreement.
Finds regions where fewer than one quarter of the model
simulations are outside the range of observational uncertainty.
"""
_maskf = np.zeros_like(mmdat)
_maskf[(mmdat < dat_95) & (mmdat > dat_5)] = 1
num_count = _maskf.sum(0)
agreement_mask = np.zeros_like(num_count)
agreement_mask[num_count < nmodel_reject] = 1
wnan = np.ma.masked_invalid(dat_5).mask
agreement_mask[wnan] = 0.
return agreement_mask
def _get_hex_data(dat_1, dat_2, fig_config):
"""
Get data for density plots.
Requires that both the arrays have the same mask with regards to valid data
points
"""
dat_1[(dat_1 < fig_config['valrange_sc'][0] * 0.5)] = np.nan
dat_1[(dat_1 > fig_config['valrange_sc'][1] * 1.5)] = np.nan
dat_2[(dat_2 < fig_config['valrange_sc'][0] * 0.5)] = np.nan
dat_2[(dat_2 > fig_config['valrange_sc'][1] * 1.5)] = np.nan
dat_1, dat_2 = _apply_common_mask(dat_1, dat_2)
dat_1mc = np.ma.masked_equal(dat_1, np.nan).compressed()
dat_2mc = np.ma.masked_equal(dat_2, np.nan).compressed()
return dat_1mc, dat_2mc
def _get_obs_data(diag_config):
"""
Get and handle the observations of turnover time from Carvalhais 2014.
Argument:
--------
diag_config - nested dictionary of metadata
Return:
------
dictionary with observation data with different variables as keys
"""
if not diag_config.get('obs_variable'):
raise ValueError('The observation variable needs to be specified in '
'the recipe (see recipe description for details)')
obs_dir = os.path.join(diag_config['auxiliary_data_dir'],
diag_config['obs_info']['obs_data_subdir'])
all_data = {}
all_data['global'] = {}
all_data['grid'] = {}
fig_config = _get_fig_config(diag_config)
var_list = diag_config.get('obs_variable')
input_files = []
for _var in var_list:
var_list = np.append(var_list, '{var}_{perc:d}'.format(var=_var,
perc=5))
var_list = np.append(var_list, '{var}_{perc:d}'.format(var=_var,
perc=95))
obs_filename = (f'{_var}_{{frequency}}_{{source_label}}_'
f'{{variant_label}}_{{grid_label}}.nc'.format(
**diag_config['obs_info']))
input_files = np.append(input_files,
os.path.join(obs_dir, obs_filename))
nvars = len(var_list)
for v_ind in range(nvars):
var_obs = var_list[v_ind]
all_data['coords'] = {}
variable_constraint = _var_name_constraint(var_obs)
cube = iris.load_cube(input_files, constraint=variable_constraint)
all_data['grid'][var_obs] = cube
all_data['global'][var_obs] = fig_config['obs_global']
for coord in cube.coords():
all_data['coords'][coord.name()] = coord.points
all_data['input_files'] = input_files
return all_data
def _calc_turnover(ctotal, gpp, _model):
"""
Calculate the turnover time from ctotal and gpp.
Argument:
--------
ctotal- iris cube of total carbon stock
gpp - iris cube of gross primary productivity
Return:
------
tau_ctotal - iris cube of turnover time in years
"""
# calculate turnover and convert units to yr
tau_ctotal = (ctotal / gpp)
tau_ctotal.convert_units('yr')
# set the attributes
tau_ctotal.var_name = 'tau_ctotal'
tau_ctotal.standard_name = None
tau_ctotal.long_name = 'ecosystem_carbon_turnover_time'
tau_ctotal.units = 'yr'
return tau_ctotal
def _fix_map(axis_obj):
"""
Beautify map object.
Clean boundaries, coast lines, and removes the outline box/circle.
"""
axis_obj.set_global()
axis_obj.coastlines(linewidth=0.4, color='grey')
plt.gca().outline_patch.set_visible(False)
return axis_obj
def _get_data_to_plot(_data):
"""
Get data to plot on map.
Correct for the rotations of latitude and longitude.
"""
xroll = _data.shape[1] / 2
_data = np.roll(np.flipud(_data), int(xroll), axis=1)
return _data
def _get_matrix_map_axes(_row_m, _col_m, _fig_config):
"""
Get the axes object for matrix maps.
Argument:
--------
_row_m - row location in the matrix
_col_m - column location in the matrix
_fig_config - figure settings
Return:
------
_ax - an axes object
"""
if _row_m == _col_m:
_ax = plt.axes([
_fig_config['x0'] + _row_m * _fig_config['wp'] +
_row_m * _fig_config['xsp'], _fig_config['y0'] -
(_col_m * _fig_config['hp'] + _col_m * _fig_config['ysp']),
_fig_config['wp'], _fig_config['hp']
], projection=ccrs.Robinson(central_longitude=0), frameon=False)
if _row_m < _col_m:
_ax = plt.axes([
_fig_config['x0'] + _row_m * _fig_config['wp'] +
_row_m * _fig_config['xsp'] + _fig_config['xsp_sca'],
_fig_config['y0'] -
(_col_m * _fig_config['hp'] + _col_m * _fig_config['ysp']) +
_fig_config['ysp_sca'],
_fig_config['wp'] * _fig_config['aspect_map'],
_fig_config['hp'] * _fig_config['aspect_map']
])
if _row_m > _col_m:
_ax = plt.axes([
_fig_config['x0'] + _row_m * _fig_config['wp'] +
_row_m * _fig_config['xsp'], _fig_config['y0'] -
(_col_m * _fig_config['hp'] + _col_m * _fig_config['ysp']),
_fig_config['wp'], _fig_config['hp']
], projection=ccrs.Robinson(central_longitude=0), frameon=False)
return _ax
def _fix_matrix_axes(row_m, col_m, models, nmodels, diag_config, fig_config):
"""Fix the axes lines and titles in matrix maps."""
row_mod = models[row_m]
col_mod = models[col_m]
if row_m != 0 and col_m != nmodels - 1:
plut.ax_clr()
plut.rotate_labels(which_ax='x', axfs=fig_config['ax_fs'], rot=90)
elif row_m == 0 and col_m != nmodels - 1:
plut.ax_clr_x(axfs=fig_config['ax_fs'])
plut.rotate_labels(which_ax='x', axfs=fig_config['ax_fs'], rot=90)
elif col_m == nmodels - 1 and row_m != 0:
plut.ax_clr_y(axfs=fig_config['ax_fs'])
plut.rotate_labels(which_ax='x', axfs=fig_config['ax_fs'], rot=90)
if row_m == 0 and col_m == nmodels - 1:
plut.ax_orig(axfs=fig_config['ax_fs'])
plut.rotate_labels(which_ax='x', axfs=fig_config['ax_fs'], rot=90)
plt.ylabel('$model_{column}$', fontsize=fig_config['ax_fs'])
plt.xlabel('$model_{row}$', fontsize=fig_config['ax_fs'])
if col_m == 0:
if row_mod == 'obs':
_title_sp = diag_config['obs_info']['source_label']
else:
_title_sp = row_mod
plt.title(str(row_m + 1) + ': ' + _title_sp,
fontsize=0.809 * fig_config['ax_fs'])
if row_m == nmodels - 1:
if col_mod == 'obs':
_title_sp = diag_config['obs_info']['source_label']
else:
_title_sp = col_mod
_title_sp = str(col_m + 1)
t_x = plt.gca().text(1.1,
0.5,
_title_sp,
fontsize=0.809 * fig_config['ax_fs'],
va='center',
ha='center',
transform=plt.gca().transAxes)
else:
t_x = ''
return t_x
def _draw_121_line():
"""Draw 1:1 line on the current axis."""
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
plt.plot((xmin, xmax), (ymin, ymax), 'k', lw=0.1)
def _plot_matrix_map(plot_path_matrix, global_tau_mod, global_tau_obs,
diag_config):
"""
Plot the matrix of maps model-observation full factorial comparison.
Argument:
--------
diag_config - nested dictionary of metadata
cube - the cube to plot
dataset - name of the dataset to plot
"""
fig_config = _get_fig_config(diag_config)
models = list(global_tau_mod['grid'].keys())
models = sorted(models, key=str.casefold)
multimodel_stats = 'MultiModelMedian MultiModelMean'.split()
for _mm in multimodel_stats:
if _mm in models:
models.append(models.pop(models.index(_mm)))
models.insert(0, 'obs')
global_tau_mod['grid']['obs'] = global_tau_obs['grid']['tau_ctotal']
global_tau_mod['global']['obs'] = global_tau_obs['global']['tau_ctotal']
nmodels = len(models)
# define the data and information for plotting ratios
cb_info_ratio = _get_ratio_colorbar_info()
# get the colormap for diagonal maps
cb_info_diagonal = _get_diagonal_colorbar_info()
plt.figure(figsize=(9, 6))
for row_m in range(nmodels):
dat_row = global_tau_mod['grid'][models[row_m]].data
for col_m in range(nmodels):
dat_col = global_tau_mod['grid'][models[col_m]].data
_ax = _get_matrix_map_axes(row_m, col_m, fig_config)
# plot the maps along the diagonal
if row_m == col_m:
plt.imshow(_get_data_to_plot(dat_row),
norm=mpl.colors.BoundaryNorm(
cb_info_diagonal['tickBounds'],
len(cb_info_diagonal['tickBounds'])),
cmap=cb_info_diagonal['colMap'],
origin='upper',
vmin=cb_info_diagonal['tickBounds'][0],
vmax=cb_info_diagonal['tickBounds'][-1],
transform=ccrs.PlateCarree())
_fix_map(_ax)
# plot the scatterplot/density plot below the diagonal
if row_m < col_m:
dat1h, dat2h = _get_hex_data(dat_col, dat_row, fig_config)
_ax.hexbin(dat1h,
dat2h,
bins='log',
mincnt=3,
gridsize=40,
cmap='viridis_r',
linewidths=0)
plt.ylim(fig_config['valrange_sc'][0],
fig_config['valrange_sc'][1] * 1.05)
plt.xlim(fig_config['valrange_sc'][0],
fig_config['valrange_sc'][1] * 1.05)
_draw_121_line()
if fig_config['correlation_method'] == 'pearson':
corr = (stats.pearsonr(dat1h, dat2h)[0])**2
else:
corr = (stats.spearmanr(dat1h, dat2h)[0])**2
plt.title('$R^2$={corr:.2f}'.format(corr=corr),
fontsize=fig_config['ax_fs'] * 0.953,
ma='left',
y=fig_config['tx_y_corr'],
va="top")
# plot the maps of ratio of models and observation above the
# diagonal
if row_m > col_m:
plot_dat = _remove_invalid(dat_row / dat_col,
fill_value=fig_config['fill_value'])
_ax.imshow(_get_data_to_plot(plot_dat),
norm=mpl.colors.BoundaryNorm(
cb_info_ratio['tickBounds'],
len(cb_info_ratio['tickBounds'])),
interpolation='none',
vmin=cb_info_ratio['tickBounds'][0],
vmax=cb_info_ratio['tickBounds'][-1],
cmap=cb_info_ratio['colMap'],
origin='upper',
transform=ccrs.PlateCarree())
_fix_map(_ax)
t_x = _fix_matrix_axes(row_m, col_m, models, nmodels, diag_config,
fig_config)
# plot the colorbar for maps along the diagonal
y_colo = fig_config['y0'] + fig_config['hp'] + fig_config['cb_off_y']
_axcol_dia = [
fig_config['x_colo_d'], y_colo, fig_config['wcolo'],
fig_config['hcolo']
]
cb_tit_d = '{name} ({unit})'.format(
name=global_tau_mod['grid'][models[col_m]].long_name,
unit=global_tau_mod['grid'][models[col_m]].units)
col_bar = plut.mk_colo_tau(_axcol_dia,
cb_info_diagonal['tickBounds'],
cb_info_diagonal['colMap'],
tick_locs=cb_info_diagonal['ticksLoc'],
cbfs=0.86 * fig_config['ax_fs'],
cbtitle=cb_tit_d,
cbrt=90)
# plot the colorbar for maps above the diagonal
y_colo = fig_config['y0'] + fig_config['hp'] + fig_config['cb_off_y']
_axcol_rat = [
fig_config['x_colo_r'], y_colo, fig_config['wcolo'],
fig_config['hcolo']
]
col_bar = plut.mk_colo_cont(
_axcol_rat,
cb_info_ratio['tickBounds'],
cb_info_ratio['colMap'],
cbfs=0.7 * fig_config['ax_fs'],
cbrt=90,
col_scale='log',
cbtitle='ratio ($model_{column}$/$model_{row}$)',
tick_locs=cb_info_ratio['ticksLoc'])
col_bar.ax.set_xticklabels(cb_info_ratio['ticksLab'],
fontsize=0.86 * fig_config['ax_fs'],
ha='center',
rotation=0)
# save and close figure
plut.save_figure(plot_path_matrix, _extr_art=[t_x])
plt.close()
def _plot_multimodel_agreement(plot_path_multimodel, global_tau_mod,
global_tau_obs, diag_config):
"""
Plot map of multimodel bias and multimodel agreement.
Argument:
--------
global_tau_mod - dictionary of all model data
global_tau_obs - dictionary of observed data
diag_config - nested dictionary of metadata
"""
# get the settings for plotting figure
fig_config = _get_fig_config(diag_config)
# get the observation data needed to calculate the bias and multimodel
# agreement
obs_var = diag_config.get('obs_variable')[0]
tau_obs = global_tau_obs['grid'][obs_var].data
tau_obs_5 = global_tau_obs['grid'][obs_var + '_5'].data
tau_obs_95 = global_tau_obs['grid'][obs_var + '_95'].data
# set the information of the colormap used for plotting bias
cb_info = _get_ratio_colorbar_info()
# calculate the bias of multimodel median turnover time
models = list(global_tau_mod['grid'].keys())
# remove multimodel estimates from the list of models
multimodel_stats = 'MultiModelMedian MultiModelMean'.split()
for _mm in multimodel_stats:
if _mm in models:
models.remove(_mm)
nmodels = len(models)
dat_tau_full = np.ones((nmodels, np.shape(tau_obs)[0],
np.shape(tau_obs)[1])) * fig_config['fill_value']
for row_m in range(nmodels):
row_mod = models[row_m]
dat_tau = global_tau_mod['grid'][row_mod]
dat_tau_full[row_m] = _remove_invalid(
dat_tau.data, fill_value=fig_config['fill_value'])
mm_tau = _remove_invalid(np.nanmedian(dat_tau_full, axis=0),
fill_value=fig_config['fill_value'])
mm_bias_tau = mm_tau / tau_obs
mm_bias_tau = _remove_invalid(mm_bias_tau,
fill_value=fig_config['fill_value'])
# define figure and main axis to plot the map
plt.figure(figsize=(5, 3))
_ax = plt.axes([0.1, 0.1, 0.9, 0.9],
projection=ccrs.Robinson(central_longitude=0),
frameon=False)
# plot the data of multimodel bias (=bias of multimodel median turnover
# time)
_ax.imshow(_get_data_to_plot(mm_bias_tau),
norm=mpl.colors.BoundaryNorm(cb_info['tickBounds'],
len(cb_info['tickBounds'])),
interpolation='none',
vmin=cb_info['tickBounds'][0],
vmax=cb_info['tickBounds'][-1],
cmap=cb_info['colMap'],
origin='upper',
transform=ccrs.PlateCarree())
_fix_map(_ax)
# get the model agreement mask (less than quarter of the model within the
# observational uncertainty)
agreement_mask_tau = _get_agreement_mask(dat_tau_full,
tau_obs_5,
tau_obs_95,
nmodel_reject=int(nmodels / 4))
# plot the hatches for uncertainty/multimodel agreement
lats = global_tau_obs['coords']['latitude']
lons = global_tau_obs['coords']['longitude']
latint = abs(lats[1] - lats[0])
lonint = abs(lons[1] - lons[0])
x_lat, y_lon = np.meshgrid(lons - lonint / 2, lats - latint / 2)
_ax.contourf(x_lat,
y_lon,
agreement_mask_tau,
levels=[0, 0.5, 1],
alpha=0.,
hatches=['', '//////'],
linewidth=0.2,
transform=ccrs.PlateCarree())
title_str = ('multimodel bias and agreement (-)\n{title}'.format(
title=global_tau_obs['grid']['tau_ctotal'].long_name))
plt.title(title_str, fontsize=0.98 * fig_config['ax_fs'])
# plot colorbar using extraUtils
_axcol_rat = [0.254, fig_config['y_colo_single'], 0.6, 0.035]
col_bar = plut.mk_colo_cont(_axcol_rat,
cb_info['tickBounds'],
cb_info['colMap'],
cbfs=0.8 * fig_config['ax_fs'],
cbrt=90,
col_scale='log',
cbtitle='',
tick_locs=cb_info['ticksLoc'])
col_bar.ax.set_xticklabels(cb_info['ticksLab'],
fontsize=0.9586 * fig_config['ax_fs'],
ha='center',
rotation=0)
# save and close figure
t_x = plt.figtext(0.5, 0.5, ' ', transform=plt.gca().transAxes)
plut.save_figure(plot_path_multimodel, _extr_art=[t_x])
plt.close()
def _plot_single_map(plot_path, _dat, _datglobal, _name, provenance_record,
diag_config):
"""
Plot a map for a given variable.
Argument:
--------
diag_config - nested dictionary of metadata
cube - the cube to plot
dataset - name of the dataset to plot
"""
# figure configuration
fig_config = _get_fig_config(diag_config)
# colormap configuration
cb_info = _get_diagonal_colorbar_info()
# define the figure and axis
plt.figure(figsize=(5, 3))
_ax = plt.axes([0.1, 0.1, 0.9, 0.9],
projection=ccrs.Robinson(central_longitude=0),
frameon=False)
# plot data over the map
plt.imshow(_get_data_to_plot(_dat.data),
norm=mpl.colors.BoundaryNorm(cb_info['tickBounds'],
len(cb_info['tickBounds'])),
cmap=cb_info['colMap'],
origin='upper',
vmin=cb_info['tickBounds'][0],
vmax=cb_info['tickBounds'][-1],
transform=ccrs.PlateCarree())
_fix_map(_ax)
# get the data and set the title of the map
_dat_median = np.nanmedian(
_remove_invalid(_dat.data, fill_value=fig_config['fill_value']))
title_str = (f'{_dat.long_name} ({_dat.units}), {_name},\n'
f'global = {_datglobal:.2f}, median = {_dat_median:.2f}')
plt.title(title_str, fontsize=0.98 * fig_config['ax_fs'])
# draw the colorbar
_axcol_dia = [0.254, fig_config['y_colo_single'], 0.6, 0.035]
plut.mk_colo_tau(_axcol_dia,
cb_info['tickBounds'],
cb_info['colMap'],
tick_locs=cb_info['ticksLoc'],
cbfs=0.86 * fig_config['ax_fs'],
cbtitle='',
cbrt=90)
# save and close figure
t_x = plt.figtext(0.5, 0.5, ' ', transform=plt.gca().transAxes)
plut.save_figure(plot_path, _extr_art=[t_x])
plt.close()
with ProvenanceLogger(diag_config) as provenance_logger:
provenance_logger.log(plot_path, provenance_record)
def main(diag_config):
"""
Evaluate global distribution of ecosystem carbon turnover time.
Argument:
--------
diag_config - nested dictionary of metadata
"""
model_data_dict = group_metadata(diag_config['input_data'].values(),
'dataset')
# get the data from the observation
global_tau_obs = _get_obs_data(diag_config)
base_name = ('{title}_{source_label}_'
'{grid_label}'.format(
title=global_tau_obs['grid']['tau_ctotal'].long_name,
source_label=diag_config['obs_info']['source_label'],
grid_label=diag_config['obs_info']['grid_label']))
global_tau_mod = {}
global_tau_mod['grid'] = {}
global_tau_mod['global'] = {}
provenance_record_matrix = _get_provenance_record(
"Matrix Comparison of global distributions of turnover time of carbon",
['mean', 'perc'], ['global'],
_get_ancestor_files(diag_config, 'tau_ctotal'))
provenance_record_multimodel = _get_provenance_record(
"Multimodel bias and agreements of global distributions of turnover"
"time of carbon. Reproduces figure 3 in Carvalhais et al. (2014).",
['mean', 'perc'], ['global'],
_get_ancestor_files(diag_config, 'tau_ctotal'))
for model_name, model_dataset in model_data_dict.items():
global_tau_mod[model_name] = {}
# load the data
ctotal = _load_variable(model_dataset, 'ctotal')
gpp = _load_variable(model_dataset, 'gpp')
tau_ctotal = _calc_turnover(ctotal, gpp, model_name)
global_tau_mod['grid'][model_name] = tau_ctotal
# apply the GPP threshold and set the data in dictionary
gpp_global = gpp.collapsed(['latitude', 'longitude'],
iris.analysis.SUM)
ctotal_global = ctotal.collapsed(['latitude', 'longitude'],
iris.analysis.SUM)
tau_global = ctotal_global / gpp_global
tau_global.convert_units('yr')
global_tau_mod['global'][model_name] = np.float(tau_global
.core_data())
base_name_mod = (
'global_{title}_{source_label}_'
'{grid_label}'.format(
title=global_tau_obs['grid']['tau_ctotal'].long_name,
source_label=model_name,
grid_label=diag_config['obs_info']['grid_label']))
plot_path_mod = get_plot_filename(base_name_mod, diag_config)
# plot_path_list.append(plot_path_mod)
provenance_record_mod = _get_provenance_record(
"Map of global distribution of turnover time of carbon",
['mean', 'perc'],
['global'],
{model_name: model_dataset})
_plot_single_map(plot_path_mod, tau_ctotal,
global_tau_mod['global'][model_name],
model_name,
provenance_record_mod,
diag_config)
model_cubes = [
c for c in global_tau_mod['grid'].values()
if isinstance(c, iris.cube.Cube)
]
obs_cubes = [
c for c in global_tau_obs['grid'].values()
if isinstance(c, iris.cube.Cube)
]
netcdf_path = get_diagnostic_filename(base_name_mod, diag_config)
save_cubes = iris.cube.CubeList(model_cubes + obs_cubes)
iris.save(save_cubes, netcdf_path)
with ProvenanceLogger(diag_config) as provenance_logger:
provenance_logger.log(netcdf_path, provenance_record_mod)
# multimodel agreement
base_name_multimodel = '{prefix}_{base_name}'.format(
prefix='global_multimodelAgreement', base_name=base_name)
plot_path_multimodel = get_plot_filename(base_name_multimodel,
diag_config)
_plot_multimodel_agreement(plot_path_multimodel, global_tau_mod,
global_tau_obs, config)
with ProvenanceLogger(diag_config) as provenance_logger:
provenance_logger.log(plot_path_multimodel,
provenance_record_multimodel)
# map of observation
base_name_obs = '{prefix}_{base_name}'.format(prefix='global',
base_name=base_name)
plot_path_obs = get_plot_filename(base_name_obs, diag_config)
provenance_record_obs = _get_provenance_record(
"Map of observed global distribution of turnover time of carbon",
['mean', 'perc'],
['global'],
global_tau_obs['input_files'].tolist())
_plot_single_map(plot_path_obs,
global_tau_obs['grid']['tau_ctotal'],
global_tau_obs['global']['tau_ctotal'],
config['obs_info']['source_label'],
provenance_record_obs,
diag_config)
# matrix of maps
base_name_matrix = '{prefix}_{base_name}'.format(
prefix='global_matrix_map', base_name=base_name)
plot_path_matrix = get_plot_filename(base_name_matrix, diag_config)
_plot_matrix_map(plot_path_matrix, global_tau_mod, global_tau_obs,
config)
with ProvenanceLogger(diag_config) as provenance_logger:
provenance_logger.log(plot_path_matrix, provenance_record_matrix)
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
|
the-stack_106_26540 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json, pprint
import requests
class Authenticator( object ):
""" Enables easy calls to the BorrowDirect authN/Z webservices.
BorrowDirect 'Authentication Web Service' docs: <http://borrowdirect.pbworks.com/w/page/90132761/Authentication%20Web%20Service> (login required)
BorrowDirect 'Authorization Web Service' docs: <http://borrowdirect.pbworks.com/w/page/90132884/Authorization%20Web%20Service> (login required)
Called by BorrowDirect.run_auth_nz() """
def __init__( self, logger ):
self.logger = logger
def authenticate( self, patron_barcode, api_url, api_key, partnership_id, university_code ):
""" Accesses and returns authentication-id for storage.
Called by BorrowDirect.run_auth_nz(), Searcher.get_authorization_id(), and Requester.get_authorization_id() """
url = '%s/portal-service/user/authentication' % api_url
headers = { 'Content-type': 'application/json', 'Accept': 'text/plain'}
params = self._make_auth_params( patron_barcode, api_url, api_key, partnership_id, university_code )
self.logger.debug( 'params, `%s`' % pprint.pformat(params) )
r = requests.post( url, data=json.dumps(params), headers=headers )
self.logger.debug( 'auth response, `%s`' % unicode(r.content) )
authentication_id = r.json()['AuthorizationId']
return authentication_id
def _make_auth_params( self, patron_barcode, api_url, api_key, partnership_id, university_code ):
""" Preps param dict.
Called by authenticate() """
params = {
'ApiKey': api_key,
'UserGroup': 'patron',
'LibrarySymbol': university_code,
'PartnershipId': partnership_id,
'PatronId': patron_barcode }
return params
def authorize( self, api_url, authentication_id ):
""" Checks authorization and extends authentication session time.
Called by BorrowDirect.run_auth_nz() """
url = '%s/portal-service/user/authz/isAuthorized?aid=%s' % ( api_url, authentication_id )
r = requests.get( url )
dct = r.json()
state = dct['AuthorizationState']['State'] # boolean
assert type( state ) == bool
return state
# end class Authenticator
|
the-stack_106_26541 | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
import numpy as np
import pandas as pd
import streamlit as st
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = "red" if val < 0 else "black"
return "color: %s" % color
def highlight_max(data, color="yellow"):
"""highlight the maximum in a Series or DataFrame"""
attr = "background-color: {}".format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else "" for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(
np.where(is_max, attr, ""), index=data.index, columns=data.columns
)
# Create a table to be styled in various ways
np.random.seed(24)
df = pd.DataFrame({"A": np.linspace(1, 5, 5)})
df = pd.concat([df, pd.DataFrame(np.random.randn(5, 4), columns=list("BCDE"))], axis=1)
df.iloc[0, 2] = np.nan
# Unstyled
st._legacy_table(df)
# Custom formatting
st._legacy_table(df.style.format("{:.2%}"))
# Colors
st._legacy_table(
df.style.applymap(color_negative_red).apply(
highlight_max, color="darkorange", axis=0
)
)
# Add rows
x = st._legacy_table(
df.style.set_properties(**{"background-color": "black", "color": "lawngreen"})
)
x.legacy_add_rows(
pd.DataFrame(np.random.randn(3, 5)).style.set_properties(
**{"background-color": "lawngreen", "color": "black"}
)
)
x.legacy_add_rows(
pd.DataFrame(np.random.randn(2, 5)).style.format(
lambda value: "" if value > 0 else "*"
)
)
|
the-stack_106_26542 | import ccsyspath
LOG_PATH = "/var/log/codeplag.log"
SUPPORTED_EXTENSIONS = {
'py': [
r'.py\b'
],
'cpp': [
r'.cpp\b',
r'.c\b',
r'.h\b'
]
}
COMPILE_ARGS = '-x c++ --std=c++11'.split()
SYSPATH = ccsyspath.system_include_paths('clang++')
INCARGS = [b'-I' + inc for inc in SYSPATH]
COMPILE_ARGS = COMPILE_ARGS + INCARGS
|
the-stack_106_26545 | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
|
the-stack_106_26546 | import os, pickle, subprocess
from threading import Thread
import numpy as np
from datasets.open_bhb import OpenBHB
from sklearn.model_selection import GridSearchCV
from sklearn.base import is_classifier, is_regressor, clone
class OpenBHBMLTrainer(Thread):
"""
A convenient worker specially adapted to perform ML on OpenBHB with scikit-learn. It can be executed in
standalone fashion. The methods start()/join() must be favored to run the worker.
"""
def __init__(self, model, hyperparams, training_dataset, testing_dataset, train_indices=None, mask=None,
exp_name=None, saving_dir=None, scoring=None, scaler=None, n_jobs=1, logger=None, **kwargs):
"""
:param model: a scikit-learn model
:param hyperparams: hyper-parameters over which Grid-Search 3-fold Cross-Validation is performed with
scikit-learn
:param training_dataset/testing_dataset: OpenBHB datasets used for Train/Test.
:param train_indices (Optional): list of indices to give to <get_data> from OpenBHB (only for training)
:param mask (Optional): a binary mask to give to <get_data> from OpenBHB
:param exp_name: str, the results will be saved in <exp_name>
:param saving_dir: str, path to the results (if it does not exist, it is created)
:param scoring: scoring fn to give to scikit-learn <GridSearchCV> to perform grid-search
:param scaler: a scikit-learn Scaler to transform train/tests data
:param n_jobs: number of jobs to perform grid-search over set of hyper-parameters
:param logger: python Logger to use to write the training/tests results (convenient for debugging)
"""
super().__init__(**kwargs)
assert isinstance(training_dataset, OpenBHB) and isinstance(testing_dataset, OpenBHB), \
"Datasets must be OpenBHB"
assert (is_classifier(model) or is_regressor(model)), "Model must be a scikit-learn classifier or regressor"
self.model = model
self.hyperparams = hyperparams
self.training_dataset = training_dataset
self.test_dataset = testing_dataset
self.train_indices = train_indices
self.mask = mask
self.scoring = scoring
self.scaler = scaler
self.saving_dir = saving_dir
self.exp_name = exp_name
self.n_jobs = n_jobs
self.logger = logger
def run(self):
# Loads the data in memory
(X_train, y_train) = self.training_dataset.get_data(self.train_indices, mask=self.mask)
if self.logger is not None: self.logger.info("Data loaded.")
self.model_cv = GridSearchCV(self.model, self.hyperparams, n_jobs=self.n_jobs, scoring=self.scoring, cv=3)
if self.scaler is not None:
X_train = self.scaler.fit_transform(X_train)
# Performs Grid-Search with n_jobs workers
self.model_cv.fit(X_train, y_train)
# Reports the results on train
if self.logger is not None:
exp = os.path.join(self.saving_dir or '', self.exp_name or '{} {}'.format(self.model.__str__,
self.training_dataset.__str__))
self.logger.info("{}: Best score/params on Train: {} / {}".format(exp, self.model_cv.best_score_,
self.model_cv.best_params_))
# Free the memory as soon as possible
del (X_train)
(X_test, y_test) = self.test_dataset.get_data(mask=self.mask)
if self.scaler is not None:
X_test = self.scaler.fit_transform(X_test)
y_pred = self.model_cv.predict(X_test)
# Reports the results on tests
if self.logger is not None:
exp = os.path.join(self.saving_dir or '', self.exp_name or '{} {}'.format(self.model.__str__,
self.test_dataset.__str__))
self.logger.info("{}: Best score on Test: {}".format(exp, self.model_cv.score(X_test, y_test)))
## Saves the results on disk
file_name = self.exp_name or "Test_{}_{}.pkl".format(self.model.__str__, self.training_dataset.__str__)
if self.saving_dir is not None:
if not os.path.isdir(self.saving_dir):
# create the directory
subprocess.check_call(['mkdir', '-p', self.saving_dir])
file_name = os.path.join(self.saving_dir, file_name)
with open(file_name, 'wb') as f:
pickle.dump({'y_pred': y_pred, 'y_true': y_test}, f, protocol=4)
# saves the model in a distinct file
file_name = self.exp_name or "Test_{}_{}.pkl".format(self.model.__str__, self.training_dataset.__str__)
file_name = os.path.join(self.saving_dir, "Model_"+file_name)
with open(file_name, 'wb') as f:
pickle.dump({'model': self.model_cv}, f, protocol=4)
class MLTester(Thread):
"""
A convenient worker specially adapted to tests ML on OpenBHB with scikit-learn. It can be executed in
standalone fashion. The methods start()/join() must be favored to run the worker.
"""
def __init__(self, model, X_test, y_test, exp_name=None, saving_dir=None,
scaler=None, logger=None, **kwargs):
"""
:param model: a scikit-learn model can implement predict()
:param X_test, y_test
:param mask (Optional): a binary mask to give to <get_data> from OpenBHB
:param exp_name: str, the results will be saved in <exp_name>
:param saving_dir: str, path to the results (if it does not exist, it is created)
:param scoring: scoring fn to give to scikit-learn <GridSearchCV> to perform grid-search
:param scaler: a scikit-learn Scaler to transform tests data
:param logger: python Logger to use to write the training/tests results (convenient for debugging)
"""
super().__init__(**kwargs)
assert hasattr(model, "predict"), "Model must implement predict()"
self.model = model
self.X_test, self.y_test = X_test, y_test
self.scaler = scaler
self.saving_dir = saving_dir
self.exp_name = exp_name
self.logger = logger
def run(self):
X_test, y_test = self.X_test.copy(), self.y_test.copy()
if self.scaler is not None:
X_test = self.scaler.fit_transform(X_test)
y_pred = self.model.predict(X_test)
# Reports the results on tests
if self.logger is not None:
exp = os.path.join(self.saving_dir or '', self.exp_name or '{}'.format(self.model.__str__))
self.logger.info("{}: Best score on Test: {}".format(exp, self.model.score(X_test, y_test)))
## Saves the results on disk
file_name = self.exp_name or "Test_{}.pkl".format(self.model.__str__)
if self.saving_dir is not None:
if not os.path.isdir(self.saving_dir):
# create the directory
subprocess.check_call(['mkdir', '-p', self.saving_dir])
file_name = os.path.join(self.saving_dir, file_name)
if os.path.isfile(file_name):
raise ValueError("File %s already exists ! Aborting...")
with open(file_name, 'wb') as f:
pickle.dump({'y_pred': y_pred, 'y_true': y_test}, f, protocol=4)
class MLTrainer(Thread):
"""
A convenient worker specially adapted to perform ML with scikit-learn. It can be executed in
standalone fashion. The methods start()/join() must be favored to run the worker.
"""
def __init__(self, model, hyperparams, X_train, y_train, X_val=None, y_val=None, X_test=None, y_test=None,
test_names=None, exp_name=None, saving_dir=None, save_model=True, scoring=None, n_jobs=1,
logger=None, **kwargs):
"""
:param model: a scikit-learn model
:param hyperparams: hyper-parameters over which Grid-Search 3-fold Cross-Validation is performed with
scikit-learn
:param X_train: np.array for training. If None, it will try to load the last checkpoint and eventually test the
model.
:param X_test (optional): list of testing np.array
:param y_test (optional): list of testing np.array target labels
:param test_names (Optional): list of str to be concatenated to <exp_name> for dumping testing results.
We assume len(test_names) == len(X_test)
:param exp_name: str, the results will be saved in <exp_name>
:param saving_dir: str, path to the results (if it does not exist, it is created)
:param save_model: boolean, whether the sklearn model is saved after training or not
:param scoring: scoring fn to give to scikit-learn <GridSearchCV> to perform grid-search
:param n_jobs: number of jobs to perform grid-search over set of hyper-parameters
:param logger: python Logger to use to write the training/test results (convenient for debugging)
"""
super().__init__(**kwargs)
self.logger = logger
self.last_checkpoint = None # Flag to indicate if we directly load the last checkpoint
self.exp_name = exp_name
self.model = model
self.saving_dir = saving_dir or ""
self.hyperparams = hyperparams
self.scoring = scoring
self.X_train, self.y_train = X_train, y_train
self.X_val, self.y_val = X_val, y_val
self.X_test, self.y_test = X_test, y_test
self.test_names = test_names
self.n_jobs = n_jobs
self.save_model = save_model
self.last_checkpoint = None
if X_train is None:
file_name = self.exp_name or "Test_{}.pkl".format(self.model.__str__)
self.last_checkpoint = os.path.join(self.saving_dir, "Model_" + file_name)
if self.logger is not None:
self.logger.warning("No X_train given, the last checkpoint to be loaded will be at %s"%
self.last_checkpoint)
else:
assert isinstance(X_train, np.ndarray)
assert (is_classifier(model) or is_regressor(model)), "Model must be a scikit-learn classifier or regressor"
if X_test is not None:
assert y_test is not None and test_names is not None, "<y_test> and <test_names> must be filled !"
assert len(y_test) == len(X_test) == len(test_names)
for (X_test, y_test) in zip(X_test, y_test):
assert len(X_test) == len(y_test), "Incorrect dimension for X_test or y_test ({} != {})".\
format(X_test.shape, np.array(y_test).shape)
if X_val is not None:
assert y_val is not None and len(y_val) == len(X_val)
def run(self):
# Performs Grid-Search with n_jobs workers
if self.last_checkpoint is None:
if self.X_val is not None:
n_train, n_val = len(self.X_train), len(self.X_val)
self.model_cv = GridSearchCV(self.model, self.hyperparams, n_jobs=self.n_jobs, scoring=self.scoring,
cv=[(np.arange(n_train), n_train+np.arange(n_val))], refit=False)
self.model_cv.fit(np.concatenate((self.X_train, self.X_val)),
np.concatenate((self.y_train, self.y_val)))
best_fold, cv_results = self.model_cv.best_index_, self.model_cv.cv_results_
best_score, best_params = cv_results['split0_test_score'][best_fold], cv_results['params'][best_fold]
self.model_cv = clone(self.model).set_params(**best_params)
# Refit by hand the model with the best params found only on training set
self.model_cv.fit(self.X_train, self.y_train)
else:
self.model_cv = GridSearchCV(self.model, self.hyperparams, n_jobs=self.n_jobs, scoring=self.scoring, cv=3)
self.model_cv.fit(self.X_train, self.y_train)
best_score, best_params = self.model_cv.best_score_, self.model_cv.best_params_
# Reports the results on train
if self.logger is not None:
exp = os.path.join(self.saving_dir or '', self.exp_name or '{}'.format(self.model.__str__, ))
self.logger.info("{}: Best score/params on Train: {} / {}".format(exp, best_score, best_params))
else:
try:
self.model_cv = MLTrainer.get_pickle(self.last_checkpoint).get("model")
except BaseException as e:
self.logger.error("Impossible to load %s: %s"%(self.last_checkpoint, e))
return
file_name = self.exp_name or "Test_{}.pkl".format(self.model.__str__)
file_name = os.path.join(self.saving_dir, "Model_" + file_name)
if self.last_checkpoint is None and self.save_model:
MLTrainer.save({'model': self.model_cv}, file_name)
if self.X_test is not None:
for (X_test, y_test, test_name) in zip(self.X_test, self.y_test, self.test_names):
y_pred = self.model_cv.predict(X_test)
kwargs = dict()
try:
if hasattr(self.model_cv, "predict_proba"):
kwargs["y_pred_proba"] = self.model_cv.predict_proba(X_test)
if hasattr(self.model_cv, "decision_function"):
kwargs["decision_function"] = self.model_cv.decision_function(X_test)
except BaseException as e:
if self.logger is not None:
self.logger.error(str(e))
# Reports the results on test
if self.logger is not None:
exp = os.path.join(self.saving_dir or '', '{} {}'.format(test_name, self.exp_name or self.model.__str__))
self.logger.info("{}: Best score on {}: {}".format(exp, test_name, self.model_cv.score(X_test, y_test)))
## Saves the results on disk
file_name = "{}_{}".format(test_name, self.exp_name or (self.model.__str__+'.pkl'))
file_name = os.path.join(self.saving_dir, file_name)
MLTrainer.save({'y_pred': y_pred, 'y_true': y_test, **kwargs}, file_name)
@staticmethod
def save(obj, file):
dir_path = os.path.dirname(file)
if dir_path != '' and not os.path.isdir(dir_path):
# create the directory
subprocess.check_call(['mkdir', '-p', dir_path])
with open(file, 'wb') as f:
pickle.dump(obj, f, protocol=4)
@staticmethod
def get_pickle(path):
import pickle
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
|
the-stack_106_26548 | import numpy as np
from multiagent.core import World, Agent, Landmark, Radius
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, args=None):
world = World()
# set any world properties first
world.dim_c = 2
num_good_agents = 1
num_adversaries = args['num_adversaries']
num_agents = num_adversaries + num_good_agents # deactivate "good" agent
num_landmarks = 2
num_view_radius = num_adversaries
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.adversary = True if i < num_adversaries else False # last agent is good agent
agent.size = 0.075 if agent.adversary else 0.05
agent.accel = 3.0 if agent.adversary else 4.0
#agent.accel = 20.0 if agent.adversary else 25.0
agent.max_speed = 1.0 if agent.adversary else 1.3
agent.action_callback = None if i < (num_agents-1) else self.prey_policy
agent.view_radius = args['view_radius']
print("AGENT VIEW RADIUS set to: {}".format(agent.view_radius))
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = 0.2
landmark.boundary = False
world.radius = [Radius() for i in range(num_view_radius)]
for i, radius in enumerate(world.radius):
radius.name = 'radius %d' % i
radius.collide = False
radius.movable = False
radius.size = args['view_radius']
radius.boundary = False
# make initial conditions
self.reset_world(world)
self.score_function= args['score_function']
self.prey_level= args['prey_level']
world.env_info = {
'state_shape': (12+2*(num_adversaries-1))*num_adversaries,
'obs_shape': 12+2*(num_adversaries-1),
'n_actions': 2,
'n_agents': num_adversaries,
'episode_limit': 10
}
return world
def prey_policy(self, agent, world):
action = None
n = 100 # number of positions sampled
# sample actions randomly from a target circle
length = np.sqrt(np.random.uniform(0, 1, n))
angle = np.pi * np.random.uniform(0, 2, n)
x = length * np.cos(angle)
y = length * np.sin(angle)
# evaluate score for each position
# check whether positions are reachable
# sample a few evenly spaced points on the way and see if they collide with anything
scores = np.zeros(n, dtype=np.float32)
n_iter = 5
if self.score_function == "sum":
for i in range(n_iter):
waypoints_length = (length / float(n_iter)) * (i + 1)
x_wp = waypoints_length * np.cos(angle)
y_wp = waypoints_length * np.sin(angle)
proj_pos = np.vstack((x_wp, y_wp)).transpose() + agent.state.p_pos
for _agent in world.agents:
if _agent.name != agent.name:
delta_pos = _agent.state.p_pos - proj_pos
dist = np.sqrt(np.sum(np.square(delta_pos), axis=1))
dist_min = _agent.size + agent.size
scores[dist < dist_min] = -9999999
if i == n_iter - 1 and _agent.movable:
scores += dist
# Discourage the agent to leave the screen
# Code should be improved
scores[np.transpose(proj_pos)[0]<-1] = -9999999
scores[np.transpose(proj_pos)[0] > 1] = -9999999
scores[np.transpose(proj_pos)[1] < -1] = -9999999
scores[np.transpose(proj_pos)[1] > 1] = -9999999
elif self.score_function == "min":
rel_dis = []
adv_names = []
adversaries = self.adversaries(world)
proj_pos = np.vstack((x, y)).transpose() + agent.state.p_pos # the position of the 100 sampled points.
for adv in adversaries:
rel_dis.append(np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos))))
adv_names.append(adv.name)
min_dis_adv_name = adv_names[np.argmin(rel_dis)]
for adv in adversaries:
delta_pos = adv.state.p_pos - proj_pos
dist = np.sqrt(np.sum(np.square(delta_pos), axis=1))
dist_min = adv.size + agent.size
scores[dist < dist_min] = -9999999
if adv.name == min_dis_adv_name:
scores += dist
elif self.score_function == "random":
for i in range(n_iter):
waypoints_length = (length / float(n_iter)) * (i + 1)
x_wp = waypoints_length * np.cos(angle)
y_wp = waypoints_length * np.sin(angle)
proj_pos = np.vstack((x_wp, y_wp)).transpose() + agent.state.p_pos
scores[np.transpose(proj_pos)[0]<-1] = -9999999
scores[np.transpose(proj_pos)[0] > 1] = -9999999
scores[np.transpose(proj_pos)[1] < -1] = -9999999
scores[np.transpose(proj_pos)[1] > 1] = -9999999
else:
raise Exception("Unknown score function {}".format(self.score_function))
# move to best position
best_idx = np.argmax(scores)
chosen_action = np.array([x[best_idx]*self.prey_level, y[best_idx]*self.prey_level], dtype=np.float32)
if scores[best_idx] < 0:
chosen_action *= 0.0 # cannot go anywhere
return chosen_action
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.85, 0.35]) if not agent.adversary else np.array([0.85, 0.35, 0.35])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
for i, radius in enumerate(world.radius):
radius.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
if not landmark.boundary:
landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for i, radius in enumerate(world.radius):
radius.state.p_pos = world.agents[i].state.p_pos
radius.state.p_vel = world.agents[i].state.p_vel
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
if agent.adversary:
collisions = 0
for a in self.good_agents(world):
if self.is_collision(a, agent):
collisions += 1
return collisions
else:
return 0
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
# return all agents that are not adversaries
def good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all adversarial agents
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark
main_reward = self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
return main_reward
def agent_reward(self, agent, world):
# Agents are negatively rewarded if caught by adversaries
rew = 0
shape = False
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (increased reward for increased distance from adversary)
for adv in adversaries:
rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
rew -= 10
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def adversary_reward(self, agent, world):
# Adversaries are rewarded for collisions with agents
rew = 0
shape = False
agents = self.good_agents(world)
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (decreased reward for increased distance from agents)
for adv in adversaries:
rew -= 0.1 * min([np.sqrt(np.sum(np.square(a.state.p_pos - adv.state.p_pos))) for a in agents])
if agent.collide:
for ag in agents:
for adv in adversaries:
if self.is_collision(ag, adv):
rew += 10
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
dist = np.sqrt(np.sum(np.square(entity.state.p_pos - agent.state.p_pos)))
if not entity.boundary and (agent.view_radius >= 0) and dist <= agent.view_radius:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
else:
entity_pos.append(np.array([0., 0.]))
# communication of all other agents
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
dist = np.sqrt(np.sum(np.square(other.state.p_pos - agent.state.p_pos)))
if agent.view_radius >= 0 and dist <= agent.view_radius:
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not other.adversary:
other_vel.append(other.state.p_vel)
else:
other_pos.append(np.array([0., 0.]))
if not other.adversary:
other_vel.append(np.array([0., 0.]))
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + other_vel)
def full_observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
if not entity.boundary:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not other.adversary:
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + other_vel)
|
the-stack_106_26549 | #!/usr/bin/env python2.6
"""
Tue Dec 4 11:54:18 PST 2012
Parse Blast XML output file and cluster sequences using greedy approach.
Input: Blast xml file
Output: Text file, each line = 1 cluster, each element of a cluster is space-separated
Algorithm summary:
Sorted sequences by descending in size
Start with the largest sequence and use that as seed
For each sequence:
Search for the closest seed that have >= %X similarity cutoff
If found such seed: add the sequence to the seed's cluster
else: the sequence becomes a seed of a new cluster
Cluster types:
1/ Seed has multiple expanded clones as matches.
1.1: The expanded clones are from the same sample with seed
1.2: The expanded clones are from at least one sample different from seed sample
2/ Seed has many small clones with similar motifs
Cutoffs includes: a/ minimum number of clones contribute to one motif, b/ number of samples
2.1: the clones carrying the motif are from the same sample with seed
2.2: the clones carrying the motif are from at least one different sample than seed sample
3/ Seed has no similar clones:
3.1: seed is expanded
3.2: seed is not expanded
4/ Everything else (similar to type 2 but did not pass the cutoffs, i.e seed has a small number of low-frequency hits, or too many motifs but not enough clones to support a single motif)
"""
import os, re, sys
from Bio.Blast import NCBIXML
from optparse import OptionParser
def getCloneInfo(clonestr):
#as11D;183042;size=8925
items = clonestr.lstrip('>').split(';')
sample = items[0]
size = int(items[-1].lstrip("size="))
id = items[1]
return sample, id, size
class Clone():
def __init__(self, clonestr):
sample, id, size = getCloneInfo(clonestr)
self.desc = clonestr.lstrip('>')
self.sample = sample
self.id = id
self.size = size
self.seq = ''
self.hits = {} #key = hitCloneId, val = Hit
def setSeq(self, seq):
self.seq = seq
def addHit(self, hitid, hit):
self.hits[hitid] = hit
def setFreq(self, total):
if total == 0:
raise ValueError("Error: Total sequences of sample %s is 0." %(self.sample))
else:
self.freq = 100.0*self.size/total
def __cmp__(self, other):
return cmp(self.size, other.size)
class Cluster():
def __init__(self, seed):
self.clones = [seed]
self.totalReads = seed.size
self.numClones = 1
self.seed = seed
self.motif2count = {}
def addClone(self, clone):
if clone not in self.clones:
self.totalReads += clone.size
self.numClones += 1
self.clones.append(clone)
def setType(self, type):
self.type = type
def setMotifs(self, motif2count):
self.motif2count = motif2count
def __cmp__(self, other):
return cmp(self.totalReads, other.totalReads)
def typeid2desc(id):
id2desc = { 1.1: "Multiple expanded clones from 1 sample",
1.2: "Multiple expanded clones from at least 2 samples",
2.1: "Multiple non-expanded clones carrying the same motif, from 1 sample",
2.2: "Multiple non-expanded clones carrying the same motif, from >=2 samples",
3.1: "Clone with no hit, expanded",
3.2: "Clone with no hit, non-expanded",
4: "Others"}
return id2desc[id]
def isExpanded(clone, minSize, minFreq):
if clone.size >= minSize and clone.freq >= minFreq: #expanded
return True
return False
def isSuper(motif1, motif2):
#Return True if motif1 is a superset of motif2, otherwise return False
if motif1 == motif2 or len(motif1) != len(motif2):
return False
for i, m1 in enumerate(motif1):
if m1 != '.' and m1 != motif2[i]:
return False
return True
def getClusterType(seed2cluster, options):
for seed, cluster in seed2cluster.iteritems():
seedclone = cluster.seed
if cluster.numClones == 1: #single element cluster
if isExpanded( seedclone, options.minExpSize, options.minExpFreq ):
cluster.setType(3.1)
else:
cluster.setType(3.2)
else:
numExp = 0
expSamples = []
motif2count = {}
motif2samples = {}
if isExpanded(seedclone, options.minExpSize, options.minExpFreq):
numExp += 1
expSamples.append(seedclone.sample)
for hitclone in cluster.clones:
if hitclone.desc != seed:
if isExpanded(hitclone, options.minExpSize, options.minExpFreq):
numExp += 1
if hitclone.sample not in expSamples:
expSamples.append(hitclone.sample)
if hitclone.desc not in seedclone.hits:
hit = hitclone.hits[seed]
#sys.stderr.write("Seed: %s. Hitclone: %s is not in the hits list: %s.\n" %(seed, hitclone.desc, " ".join(seedclone.hits.keys())))
else:
hit = seedclone.hits[hitclone.desc]
motif = ""
for i, q in enumerate(hit.query):
s = hit.sbjct[i]
if q == s:
motif += q
else:
motif += "."
#Search to see if any existing motif is a superset of current motif or if current motif is a super set of existing motif:
added = False
for prevmotif in motif2count.keys():
if motif == prevmotif or isSuper(prevmotif, motif):#prevmotif is a superset of current motif, update its count and don't add curr motif
motif2count[prevmotif] += 1
if hitclone.sample not in motif2samples[prevmotif]:
motif2samples[prevmotif].append(hitclone.sample)
added = True
break
if not added: #no prev motif is a super set of current motif
#check if current motif is a superset of prevmotif, add curr motif, remove previous motif
for prevmotif in motif2count.keys():
if isSuper(motif, prevmotif):
if motif not in motif2count:
motif2count[motif] = motif2count[prevmotif]
motif2samples[motif] = [ seedclone.sample ]
else:
motif2count[motif] += motif2count[prevmotif]
for sample in motif2samples[prevmotif]:
if sample not in motif2samples[motif]:
motif2samples[motif].append(sample)
del motif2count[prevmotif]
del motif2samples[prevmotif]
if motif not in motif2count:
motif2count[motif] = 1
motif2samples[motif] = [ seedclone.sample ]
else:
motif2count[motif] += 1
if hitclone.sample not in motif2samples[motif]:
motif2samples[motif].append(hitclone.sample)
if numExp >= options.minExpClones: #type 1
if len(expSamples) == 1: #only the seed clone
type = 1.1
else:
type = 1.2
else:
type = 4
for motif, count in motif2count.iteritems():
if count >= options.minMotifClones:#type 2
if len( motif2samples[motif] ) == 1:
type = 2.1
else:
type = 2.2
break
cluster.setType(type)
cluster.setMotifs(motif2count)
def getFh(type, fh11, fh12, fh21, fh22, fh31, fh32, fh4):
if type == 1.1:
return fh11
elif type == 1.2:
return fh12
elif type == 2.1:
return fh21
elif type == 2.2:
return fh22
elif type == 3.1:
return fh31
elif type == 3.2:
return fh32
else:
return fh4
def printClusters( outfile, seed2cluster ):
clusters = sorted( seed2cluster.values(), reverse = True )
fh = open(outfile, 'w')
outbasename = outfile.rstrip('txt').rstrip('.')
vjname = os.path.basename(outbasename)
fh11 = open( "%s_1.1" % outbasename, 'w' )
fh12 = open( "%s_1.2" % outbasename, 'w' )
fh21 = open( "%s_2.1" % outbasename, 'w' )
fh22 = open( "%s_2.2" % outbasename, 'w' )
fh31 = open( "%s_3.1" % outbasename, 'w' )
fh32 = open( "%s_3.2" % outbasename, 'w' )
fh4 = open( "%s_4" % outbasename, 'w' )
totalClones = 0
for i, cluster in enumerate(clusters):
clones = sorted( cluster.clones, key= lambda c:c.size, reverse=True)
fh.write( "%s\n" %(" ".join([c.desc for c in clones])) )
totalClones += cluster.numClones
fh_long = getFh( cluster.type, fh11, fh12, fh21, fh22, fh31, fh32, fh4 )
fh_long.write(">Cluster %d, type %.1f, %s, %d clones, %d totalReads, motifs: %s\n" %(i, cluster.type, vjname, cluster.numClones, cluster.totalReads, ";".join(["%s_%d" %(m,c) for m,c in cluster.motif2count.iteritems()]) ))
for c in clones:
fh_long.write("\t%s\t%s\t%f\n" %(c.seq, c.desc, c.freq))
fh_long.write("\n")
#fh_long.write("\nTotal clones: %d\nTotal clusters: %d\n" %(totalClones, len(clusters) ))
fh.close()
fh11.close()
fh12.close()
fh21.close()
fh22.close()
fh31.close()
fh32.close()
fh4.close()
def getClusters(clones):
seed2cluster = {} #key = seed, val = list of clones that cluster to the seed
if len(clones) == 0:
return seed2cluster
#First seed:
firstCluster = Cluster(clones[0])
seed2cluster[ clones[0].desc ] = firstCluster
if len(clones) == 1:
return seed2cluster
for clone in clones[1:]:
maxPos = 0
maxSize = 0
bestSeed = ''
for seed in seed2cluster:
if seed in clone.hits:
hit = clone.hits[seed]
#if hit.positives > maxPos:
if hit.identities > maxPos:
#maxPos = hit.positives
maxPos = hit.identities
maxSize = Clone(seed).size
bestSeed = seed
#elif hit.positives == maxPos:
elif hit.identities == maxPos:
currSize = Clone(seed).size
if currSize > maxSize:
maxSize = currSize
bestSeed = seed
if bestSeed != '':
seed2cluster[ bestSeed ].addClone( clone ) #add to the cluster of the closest seed
else:
cluster = Cluster(clone)
seed2cluster[ clone.desc ] = cluster #create new cluster
return seed2cluster
def readNcbiXml(infile, minLen, minPos, sample2total, sameLen):
rh = open(infile)
records = NCBIXML.parse(rh)
clones = []
for record in records:
if record.query_length < minLen: #too short, pass
continue
clone = Clone(record.query)
if sample2total:
clone.setFreq(sample2total[clone.sample])
for aln in record.alignments:
for hit in aln.hsps: # each hit
if len(hit.match) < record.query_length: #ignore local alignment
continue
if clone.seq == '':
clone.setSeq(hit.query)
if sameLen and (re.search('-', hit.query) or re.search('-', hit.sbjct)): #don't allow for gap alignment if sameLen is specifie
continue
#if float(hit.positives)/len(hit.query) < minPos: #low similarity hit, ignore
if float(hit.identities)/len(hit.query) < minPos: #low identity hit, ignore
continue
hitid = aln.title.split()[-1]
if hitid == clone.desc: #self alignment, ignore
continue
clone.addHit(hitid, hit)
clones.append(clone)
#if sample2total:
# clones = sorted( clones, key=lambda c:c.freq, reverse=True )
#else:
# clones = sorted( clones, key=lambda c:c.size, reverse=True )
return clones
def readSample2total(file):
sample2total = {}
f = open(file, 'r')
for line in f:
items = line.strip().split()
sample2total[items[0]] = int(items[1])
f.close()
return sample2total
def getfiles(indir, ext):
files = []
for file in os.listdir(indir):
items = file.split('.')
if items[-1] == ext:
files.append(file)
return files
def getInfiles(input):
ext = 'xml'
if os.path.isdir(input): #input is a directory
infiles = getfiles(input, ext)
infiles = [ os.path.join(input, f) for f in infiles ]
else:
infiles = [input]
return infiles
def addOptions(parser):
parser.add_option('-i', '--input', dest='input', help='Input file or directory')
parser.add_option('-o', '--outfile', dest='outfile', help='Output file')
parser.add_option('-p', '--positive', dest='minPos', type='float', default=0.9, help='Minimum portion of positive matches. Default=%default')
parser.add_option('-l', '--len', dest='minLen', type='int', default=10, help='Minimum sequence length to be included in the output. Default=%default')
parser.add_option('-L', '--lenRestriction', dest='sameLen', action='store_true', default=False, help='If specified, only sequences of same length can be clustered together. Default=%default')
parser.add_option('-S', '--minExpandedSize', dest='minExpSize', type='int', default=1000, help='Minimum number of reads for a clone to be called "expanded". Default=%default')
parser.add_option('-F', '--minExpandedFreq', dest='minExpFreq', type='float', default=0.0, help='Minimun frequency for a clone to be called "expanded".Range from 0 - 100. Default=%default')
parser.add_option('-C', '--minExpandedClones', dest='minExpClones', type='int', default=1, help='Minimum number of similar expanded clones for a seed and its cluster to be classified as type 1. Default=%default')
parser.add_option('-c', '--minMotifClones', dest='minMotifClones', type='int', default=10, help='Minimum number of clones carrying the same motif for a seed and its cluster to be classified as type 2. Default=%default ')
parser.add_option('--sample2total', dest='sample2total', help='Required if --minExpandedFreq is larger than 0. Format: <sample> <totalCount>')
#parser.add_option('-v', '--addV', dest='vfile', help='If specified, add the rest of V gene to each sequence in the output fasta files. Default = None')
def main():
usage = "usage: %prog [options]\n"
parser = OptionParser( usage=usage )
addOptions(parser)
options, args = parser.parse_args()
if options.minExpFreq > 0 and not options.sample2total:
parser.error("--sample2total is required as --minExpandedFreq > 0\n")
if options.sample2total:
options.sample2total = readSample2total(options.sample2total)
#Read input XML file(s):
infiles = getInfiles(options.input)
clones = []
for infile in infiles:
currclones = readNcbiXml(infile, options.minLen, options.minPos, options.sample2total, options.sameLen)
clones.extend( currclones )
if options.sample2total:
clones = sorted( clones, key=lambda c:c.freq, reverse=True )
else:
clones = sorted( clones, key=lambda c:c.size, reverse=True )
sys.stderr.write("Done reading input file. Total %d clones passed minLen\n" %len(clones))
#Done reading input XML file(s)
seed2cluster = getClusters(clones)
getClusterType(seed2cluster, options)
printClusters( options.outfile, seed2cluster )
if __name__ == '__main__':
main()
|
the-stack_106_26550 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### A demonstration algorithm to check there can be placed an order of a pair not present
### in the brokerage using the conversion between stablecoins
### </summary>
class StableCoinsRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2018, 5, 1)
self.SetEndDate(2018, 5, 2)
self.SetCash("USDT", 200000000)
self.SetBrokerageModel(BrokerageName.Binance, AccountType.Cash)
self.AddCrypto("BTCUSDT", Resolution.Hour, Market.Binance)
def OnData(self, data):
if not self.Portfolio.Invested:
self.SetHoldings("BTCUSDT", 1)
|
the-stack_106_26551 | # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this open-source project.
""" Define the Logger class to print log"""
import os
import sys
import logging
from datetime import datetime
class Logger:
def __init__(self, args, output_dir):
log = logging.getLogger(output_dir)
if not log.handlers:
log.setLevel(logging.DEBUG)
# if not os.path.exists(output_dir):
# os.mkdir(args.data.output_dir)
fh = logging.FileHandler(os.path.join(output_dir,'log.txt'))
fh.setLevel(logging.INFO)
ch = ProgressHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
self.log = log
# setup TensorBoard
# if args.tensorboard:
# from tensorboardX import SummaryWriter
# self.writer = SummaryWriter(log_dir=args.output_dir)
# else:
self.writer = None
self.log_per_updates = args.log_per_updates
def set_progress(self, epoch, total):
self.log.info(f'Epoch: {epoch}')
self.epoch = epoch
self.i = 0
self.total = total
self.start = datetime.now()
def update(self, stats):
self.i += 1
if self.i % self.log_per_updates == 0:
remaining = str((datetime.now() - self.start) / self.i * (self.total - self.i))
remaining = remaining.split('.')[0]
updates = stats.pop('updates')
stats_str = ' '.join(f'{key}[{val:.8f}]' for key, val in stats.items())
self.log.info(f'> epoch [{self.epoch}] updates[{updates}] {stats_str} eta[{remaining}]')
if self.writer:
for key, val in stats.items():
self.writer.add_scalar(f'train/{key}', val, updates)
if self.i == self.total:
self.log.debug('\n')
self.log.debug(f'elapsed time: {str(datetime.now() - self.start).split(".")[0]}')
def log_eval(self, stats, metrics_group=None):
stats_str = ' '.join(f'{key}: {val:.8f}' for key, val in stats.items())
self.log.info(f'valid {stats_str}')
if self.writer:
for key, val in stats.items():
self.writer.add_scalar(f'valid/{key}', val, self.epoch)
# for mode, metrics in metrics_group.items():
# self.log.info(f'evaluation scores ({mode}):')
# for key, (val, _) in metrics.items():
# self.log.info(f'\t{key} {val:.4f}')
# if self.writer and metrics_group is not None:
# for key, val in stats.items():
# self.writer.add_scalar(f'valid/{key}', val, self.epoch)
# for key in list(metrics_group.values())[0]:
# group = {}
# for mode, metrics in metrics_group.items():
# group[mode] = metrics[key][0]
# self.writer.add_scalars(f'valid/{key}', group, self.epoch)
def __call__(self, msg):
self.log.info(msg)
class ProgressHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
log_entry = self.format(record)
if record.message.startswith('> '):
sys.stdout.write('{}\r'.format(log_entry.rstrip()))
sys.stdout.flush()
else:
sys.stdout.write('{}\n'.format(log_entry))
|
the-stack_106_26553 | # Copyright 2019 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import torch.utils.data
from scipy import misc
from torch import optim, nn
from torch.autograd import Variable
from torchvision.utils import save_image
from vae_gan_net import *
import numpy as np
import pickle
import time
import random
import os
from dlutils import batch_provider
from dlutils.pytorch.cuda_helper import *
from torchvision import transforms
im_size = 128
def process_batch(batch):
x = torch.from_numpy(np.asarray(batch, dtype=np.float32)).cuda()
# x = torch.from_numpy(np.asarray(batch, dtype=np.float32) / 255.)
x = x.view(-1, 1, im_size, im_size)
return x
def free_params(module: nn.Module):
for p in module.parameters():
p.requires_grad = True
def frozen_params(module: nn.Module):
for p in module.parameters():
p.requires_grad = False
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('ConvTranspose2d') != -1:
m.weight.data.normal_(0.0, 0.02)
def main():
input_channels = 1
hidden_size = 128
max_epochs = 500
lr = 3e-4
beta = 20
alpha = 0.2
gamma = 30
batch_size = 60
G = VAE_GAN_Generator(input_channels, hidden_size).cuda()
D = Discriminator(input_channels).cuda()
# G.load_state_dict(torch.load('G.pkl'))
# D.load_state_dict(torch.load('D.pkl'))
G.apply(weights_init)
D.apply(weights_init)
criterion = nn.BCELoss()
criterion.cuda()
opt_enc = optim.RMSprop(G.encoder.parameters(), lr=lr, alpha=0.9)
opt_dec = optim.RMSprop(G.decoder.parameters(), lr=lr, alpha=0.9)
opt_dis = optim.RMSprop(D.parameters(), lr=lr * alpha, alpha=0.9)
#opt_dis = optim.RMSprop(D.parameters(), lr=lr )
fixed_noise = Variable(torch.randn(batch_size, hidden_size)).cuda()
for epoch in range(max_epochs):
G.train()
D.train()
#tmp= epoch % 5
with open('./data_noise_128.pkl', 'rb') as pkl:
data_noise = pickle.load(pkl)
with open('../vae_gan_brain/data_fold_train_128.pkl', 'rb') as pkl:
data_train = pickle.load(pkl)
#data_train=data_train[0:13376]
print("Train set size:", len(data_train))
batches = batch_provider(data_train, batch_size, process_batch, report_progress=True)
batches_noise = batch_provider(data_noise, batch_size, process_batch, report_progress=True)
D_real_list, D_rec_enc_list, D_rec_noise_list, D_list = [], [], [], []
g_loss_list, rec_loss_list, prior_loss_list = [], [], []
epoch_start_time = time.time()
i = 0
for x_noise,org in zip(batches_noise,batches):
# ones_label = torch.ones(batch_size).cuda()
# zeros_label = torch.zeros(batch_size).cuda()
ones_label = Variable(torch.ones(batch_size)).cuda()
zeros_label = Variable(torch.zeros(batch_size)).cuda()
datav = Variable(x_noise).cuda()
orgv=Variable(org).cuda()
mean, logvar, rec_enc = G(datav)
noisev = Variable(torch.randn(batch_size, hidden_size)).cuda()
rec_noise = G.decoder(noisev)
#
# ======== Train Discriminator ======== #
frozen_params(G)
free_params(D)
#
# train discriminator
output = D(orgv)
output=output.squeeze(1)
errD_real = criterion(output, ones_label)
D_real_list.append(output.data.mean())
output = D(rec_enc)
output=output.squeeze(1)
errD_rec_enc = criterion(output, zeros_label)
D_rec_enc_list.append(output.data.mean())
output = D(rec_noise)
output=output.squeeze(1)
errD_rec_noise = criterion(output, zeros_label)
D_rec_noise_list.append(output.data.mean())
dis_img_loss = errD_real + errD_rec_enc + errD_rec_noise
#dis_img_loss = errD_real + errD_rec_enc
# print ("print (dis_img_loss)", dis_img_loss)
D_list.append(dis_img_loss.data.mean())
opt_dis.zero_grad()
dis_img_loss.backward(retain_graph=True)
opt_dis.step()
# ======== Train Generator ======== #
free_params(G)
frozen_params(D)
# train decoder
output = D(orgv)
output=output.squeeze(1)
errD_real = criterion(output, ones_label)
output = D(rec_enc)
output=output.squeeze(1)
errD_rec_enc = criterion(output, zeros_label)
output = D(rec_noise)
output=output.squeeze(1)
errD_rec_noise = criterion(output, zeros_label)
similarity_rec_enc = D.similarity(rec_enc)
similarity_data = D.similarity(orgv)
dis_img_loss = errD_real + errD_rec_enc + errD_rec_noise
#dis_img_loss = errD_real + errD_rec_enc
#print ("dis_img_loss",dis_img_loss)
#gen_img_loss = - dis_img_loss
gen_img_loss = -dis_img_loss
g_loss_list.append(gen_img_loss.data.mean())
rec_loss = ((similarity_rec_enc - similarity_data) ** 2).mean()
rec_loss_list.append(rec_loss.data.mean())
err_dec = gamma * rec_loss + gen_img_loss
#print("err_dec",err_dec)
opt_dec.zero_grad()
err_dec.backward(retain_graph=True)
opt_dec.step()
# train encoder
prior_loss = 1 + logvar - mean.pow(2) - logvar.exp()
prior_loss = (-0.5 * torch.sum(prior_loss)) / torch.numel(mean.data)
#print (prior_loss, mean, std)
prior_loss_list.append(prior_loss.data.mean())
err_enc = prior_loss + beta * rec_loss
opt_enc.zero_grad()
err_enc.backward()
opt_enc.step()
#############################################
os.makedirs('results_ori', exist_ok=True)
os.makedirs('results_rec', exist_ok=True)
os.makedirs('results_gen', exist_ok=True)
epoch_end_time = time.time()
per_epoch_ptime = epoch_end_time - epoch_start_time
# report losses and save samples each 60 iterations
m = 6
i += 1
if epoch%5==0 and i % m == 0:
print(
'[%d/%d]: D_real:%.4f, D_enc:%.4f, D_noise:%.4f, Loss_D:%.4f,Loss_G:%.4f, rec_loss:%.4f, prior_loss:%.4f'
# '[%d/%d]: D_real:%.4f, D_enc:%.4f, Loss_D:%.4f, \\'
% (epoch,
max_epochs,
torch.mean(torch.tensor(D_real_list)),
torch.mean(torch.tensor(D_rec_enc_list)),
torch.mean(torch.tensor(D_rec_noise_list)),
torch.mean(torch.tensor(D_list)),
torch.mean(torch.tensor(g_loss_list)),
torch.mean(torch.tensor(rec_loss_list)),
torch.mean(torch.tensor(prior_loss_list))))
with torch.no_grad():
D.eval()
G.eval()
_, _, x_rec = G.forward(x_noise)
x_gen = G.decoder(fixed_noise)
x_noise=x_noise.cpu()
x_gen=x_gen.cpu()
x_rec=x_rec.cpu()
# save_image(resultsample.view(-1, 3, im_size, im_size),
# 'results_rec/sample_' + str(epoch) + "_" + str(i) + '.png')
for j in range(20,29):
org_img = transforms.ToPILImage()(x_noise[j].squeeze(0)).convert('L')
rec_img = transforms.ToPILImage()(x_rec[j].squeeze(0)).convert('L')
gen_img = transforms.ToPILImage()(x_gen[j].squeeze(0)).convert('L')
org_img.save('results_ori/ori_' + str(epoch) + "_" + str(i) +"_"+str(j)+ '.png')
rec_img.save('results_rec/rec_' + str(epoch) + "_" + str(i) + "_"+str(j)+ '.png')
gen_img.save('results_gen/gen_' + str(epoch) + "_" + str(i) +"_"+str(j)+ '.png')
# resultsample = x_rec * 0.5 + 0.5
# resultsample = resultsample.cpu()
# save_image(resultsample.view(-1, 3, im_size, im_size),
# 'results_gen/sample_' + str(epoch) + "_" + str(i) + '.png')
del batches
del data_train
del batches_noise
del data_noise
print("Training finish!... save training results")
torch.save(G.state_dict(), "G_noise.pkl")
torch.save(D.state_dict(), "D_noise.pkl")
if __name__ == '__main__':
main()
|
the-stack_106_26554 | import uuid
import random
import string
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from django.db.models import UUIDField
from jsonfield import JSONField
import Levenshtein as lev
author = 'oTree Bogota Tutorial 2018'
doc = """
This game generate a random string of <b>5 numbers</b>, <b>5 letters</b> and
as must <b>5 spaces</b>. Every string must be a some Levenshtein distance of
the others.
"""
class Constants(BaseConstants):
name_in_url = 'real_effort0'
players_per_group = None
random_string_conf = {"numbers": 5, "letters": 15, "spaces": 5}
num_rounds = 1
timeout = 60
texts_number = 100
text_size = 5 # words
min_distance_different_text = 10
class Subsession(BaseSubsession):
texts = JSONField()
def random_string(self, numbers, letters, spaces):
numbers = [random.choice(string.digits) for _ in range(numbers)]
letters = [random.choice(string.ascii_uppercase) for _ in range(letters)]
spaces = [" "] * spaces
rstring = numbers + letters + spaces
random.shuffle(rstring)
return " ".join("".join(rstring).strip().split())
def creating_session(self):
texts = []
while len(texts) < Constants.texts_number:
text = self.random_string(**Constants.random_string_conf)
distances = [lev.distance(text, t) for t in texts]
if not texts or min(distances) > Constants.min_distance_different_text:
texts.append(text)
self.texts = texts
def set_payoffs(self):
players = self.get_players()
payoff = sum([p.current_text_idx for p in players]) / len(players)
for p in players:
p.payoff = payoff
class Group(BaseGroup):
pass
class Player(BasePlayer):
token = UUIDField(default=uuid.uuid4, editable=False)
end = models.BooleanField(default=False)
current_text_idx = models.IntegerField(default=0)
def current_text(self):
try:
return self.subsession.texts[self.current_text_idx]
except IndexError:
return None
def is_transcription_accurate(self, transcription):
text = self.current_text()
if lev.distance(text, transcription) <= Constants.min_distance_different_text:
self.current_text_idx += 1
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.