code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import TCorrelate
def test_TCorrelate_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
num_threads=dict(nohash=True,
usedefault=True,
),
out_file=dict(argstr='-prefix %s',
name_source='xset',
name_template='%s_tcorr',
),
outputtype=dict(),
pearson=dict(argstr='-pearson',
),
polort=dict(argstr='-polort %d',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
xset=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-2,
),
yset=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
)
inputs = TCorrelate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TCorrelate_outputs():
output_map = dict(out_file=dict(),
)
outputs = TCorrelate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| mick-d/nipype | nipype/interfaces/afni/tests/test_auto_TCorrelate.py | Python | bsd-3-clause | 1,367 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
logger = logging.getLogger('magiccontent.default_auth')
def naive_can_edit(request):
logger.warning(
('naive_can_edit method has been used, please provide a '
'GALLERY_PAGE_IS_OWNER_METHOD to improve the content security'))
if request.user.is_authenticated() and request.user.is_staff:
return True
return False
| DjenieLabs/django-magic-gallery | magicgallery/default_auth.py | Python | bsd-3-clause | 471 |
from __future__ import absolute_import
from sentry.identity.vsts import VSTSIdentityProvider
from sentry.integrations.exceptions import IntegrationError
from sentry.integrations.vsts import VstsIntegration, VstsIntegrationProvider
from sentry.models import (
Integration, IntegrationExternalProject, OrganizationIntegration, Repository,
Project
)
from sentry.plugins import plugins
from tests.sentry.plugins.testutils import VstsPlugin # NOQA
from .testutils import VstsIntegrationTestCase, CREATE_SUBSCRIPTION
class VstsIntegrationProviderTest(VstsIntegrationTestCase):
# Test data setup in ``VstsIntegrationTestCase``
def test_basic_flow(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert integration.external_id == self.vsts_account_id
assert integration.name == self.vsts_account_name
metadata = integration.metadata
assert metadata['scopes'] == list(VSTSIdentityProvider.oauth_scopes)
assert metadata['subscription']['id'] == \
CREATE_SUBSCRIPTION['publisherInputs']['tfsSubscriptionId']
assert metadata['domain_name'] == '{}.visualstudio.com'.format(
self.vsts_account_name
)
def test_migrate_repositories(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert Repository.objects.get(
id=accessible_repo.id,
).integration_id == integration.id
assert Repository.objects.get(
id=inaccessible_repo.id,
).integration_id is None
def setupPluginTest(self):
self.project = Project.objects.create(
organization_id=self.organization.id,
)
self.plugin = plugins.get('vsts')
self.plugin.enable(self.project)
def test_disabled_plugin_when_fully_migrated(self):
self.setupPluginTest()
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Enabled before Integration installation
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
self.assert_installation()
# Disabled
assert 'vsts' not in [p.slug for p in plugins.for_project(self.project)]
def test_doesnt_disable_plugin_when_partially_migrated(self):
self.setupPluginTest()
# Repo accessible by new Integration
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Inaccessible Repo - causes plugin to stay enabled
Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
# Still enabled
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
def test_build_integration(self):
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
integration = VstsIntegrationProvider()
integration_dict = integration.build_integration(state)
assert integration_dict['name'] == self.vsts_account_name
assert integration_dict['external_id'] == self.vsts_account_id
assert integration_dict['metadata']['domain_name'] == \
'{}.visualstudio.com'.format(self.vsts_account_name)
assert integration_dict['user_identity']['type'] == 'vsts'
assert integration_dict['user_identity']['external_id'] == \
self.vsts_account_id
assert integration_dict['user_identity']['scopes'] == sorted(
VSTSIdentityProvider.oauth_scopes)
def test_webhook_subscription_created_once(self):
self.assert_installation()
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
# The above already created the Webhook, so subsequent calls to
# ``build_integration`` should omit that data.
data = VstsIntegrationProvider().build_integration(state)
assert 'subscription' not in data['metadata']
def test_fix_subscription(self):
external_id = '1234567890'
Integration.objects.create(
metadata={},
provider='vsts',
external_id=external_id,
)
data = VstsIntegrationProvider().build_integration({
'account': {
'AccountName': self.vsts_account_name,
'AccountId': external_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
})
assert external_id == data['external_id']
subscription = data['metadata']['subscription']
assert subscription['id'] is not None and subscription['secret'] is not None
class VstsIntegrationTest(VstsIntegrationTestCase):
def test_get_organization_config(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
fields = integration.get_installation(
integration.organizations.first().id
).get_organization_config()
assert [field['name'] for field in fields] == [
'sync_status_forward',
'sync_forward_assignment',
'sync_comments',
'sync_status_reverse',
'sync_reverse_assignment',
]
def test_update_organization_config_remove_all(self):
self.assert_installation()
model = Integration.objects.get(provider='vsts')
integration = VstsIntegration(model, self.organization.id)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id,
)
data = {
'sync_status_forward': {},
'other_option': 'hello',
}
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=1,
resolved_status='ResolvedStatus1',
unresolved_status='UnresolvedStatus1',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=2,
resolved_status='ResolvedStatus2',
unresolved_status='UnresolvedStatus2',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=3,
resolved_status='ResolvedStatus3',
unresolved_status='UnresolvedStatus3',
)
integration.update_organization_config(data)
external_projects = IntegrationExternalProject.objects \
.all() \
.values_list('external_id', flat=True)
assert list(external_projects) == []
config = OrganizationIntegration.objects.get(
organization_id=org_integration.organization_id,
integration_id=org_integration.integration_id
).config
assert config == {
'sync_status_forward': False,
'other_option': 'hello',
}
def test_update_organization_config(self):
self.assert_installation()
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id,
)
model = Integration.objects.get(provider='vsts')
integration = VstsIntegration(model, self.organization.id)
# test validation
data = {
'sync_status_forward': {
1: {
'on_resolve': '',
'on_unresolve': 'UnresolvedStatus1',
},
},
}
with self.assertRaises(IntegrationError):
integration.update_organization_config(data)
data = {
'sync_status_forward': {
1: {
'on_resolve': 'ResolvedStatus1',
'on_unresolve': 'UnresolvedStatus1',
},
2: {
'on_resolve': 'ResolvedStatus2',
'on_unresolve': 'UnresolvedStatus2',
},
4: {
'on_resolve': 'ResolvedStatus4',
'on_unresolve': 'UnresolvedStatus4',
},
},
'other_option': 'hello',
}
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=1,
resolved_status='UpdateMe',
unresolved_status='UpdateMe',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=2,
resolved_status='ResolvedStatus2',
unresolved_status='UnresolvedStatus2',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=3,
resolved_status='ResolvedStatus3',
unresolved_status='UnresolvedStatus3',
)
integration.update_organization_config(data)
external_projects = IntegrationExternalProject.objects \
.all() \
.order_by('external_id')
assert external_projects[0].external_id == '1'
assert external_projects[0].resolved_status == 'ResolvedStatus1'
assert external_projects[0].unresolved_status == 'UnresolvedStatus1'
assert external_projects[1].external_id == '2'
assert external_projects[1].resolved_status == 'ResolvedStatus2'
assert external_projects[1].unresolved_status == 'UnresolvedStatus2'
assert external_projects[2].external_id == '4'
assert external_projects[2].resolved_status == 'ResolvedStatus4'
assert external_projects[2].unresolved_status == 'UnresolvedStatus4'
config = OrganizationIntegration.objects.get(
organization_id=org_integration.organization_id,
integration_id=org_integration.integration_id
).config
assert config == {
'sync_status_forward': True,
'other_option': 'hello',
}
| ifduyue/sentry | tests/sentry/integrations/vsts/test_integration.py | Python | bsd-3-clause | 12,757 |
"""A way to read and write structs to a binary file, with fast access
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
import struct
class StructFile(object):
"""A file which contains structs"""
def __init__(self, structfmt, fname):
"""Initializes a structfile using the given structfmt and fname.
The file is opened in the given mode ('rb' as default)."""
self.struct = struct.Struct(structfmt)
self.size = self.struct.size
self.fname = fname
if not os.path.exists(fname):
open(fname, 'wb').close()
self.readptr = open(fname, 'rb')
try:
self.writeptr = open(fname, 'r+b')
except IOError:
self.writeptr = None
def __len__(self):
"""Returns the number of structs in this file"""
f = self.readptr
f.seek(0, os.SEEK_END)
n = f.tell()
return n/self.size
def __iter__(self):
"""Iterates over structs in this file, from the beginning"""
f = open(self.fname, 'rb')
while 1:
try:
yield self.struct.unpack(f.read(self.size))
except EOFError:
break
def __getitem__(self, i):
"""Returns the i'th struct.
Negative indices work as well.
Raised IndexError on invalid index.
"""
l = len(self)
if i < 0:
i += l
if i >= l: raise IndexError
f = self.readptr
f.seek(self.size*i)
return self.struct.unpack(f.read(self.size))
def __setitem__(self, i, val):
"""Sets the i'th struct. The file must already have this many structs.
Negative indices work as well.
Raised IndexError on invalid index.
Raises IOError if the file doesn't have write permissions.
"""
l = len(self)
if i < 0:
i += l
if i >= l: raise IndexError
f = self.writeptr
if not f: raise IOError
f.seek(self.size*i)
f.write(self.struct.pack(*val))
def flush(self):
"""Flushes the file if any changes have been made.
Raises IOError if the file doesn't have write permissions.
"""
if not self.writeptr: raise IOError
self.writeptr.flush()
def append(self, val):
"""Adds the given value to the end of the file.
Raises IOError if the file doesn't have write permissions.
"""
f = self.writeptr
if not f: raise IOError
f.seek(0, os.SEEK_END)
f.write(self.struct.pack(*val))
| neeraj-kumar/nkpylib | structfile.py | Python | bsd-3-clause | 4,103 |
import platform
from . import meta
from . import parser
from . import tools
from . import exc
Log = tools.minimal_logger(__name__)
def get_parser(**kw):
"""
Detect the proper parser class, and return it instantiated.
Optional Arguments:
parser
The parser class to use instead of detecting the proper one.
distro
The distro to parse for (used for testing).
kernel
The kernel to parse for (used for testing).
ifconfig
The ifconfig (stdout) to pass to the parser (used for testing).
"""
parser = kw.get('parser', None)
ifconfig = kw.get('ifconfig', None)
if not parser:
distro = kw.get('distro', platform.system())
full_kernel = kw.get('kernel', platform.uname()[2])
kernel = '.'.join(full_kernel.split('.')[0:2])
if distro == 'Linux':
if float(kernel) < 3.3:
from .parser import Linux2Parser as LinuxParser
else:
from .parser import LinuxParser
print LinuxParser
parser = LinuxParser(ifconfig=ifconfig)
elif distro in ['Darwin', 'MacOSX']:
from .parser import MacOSXParser
parser = MacOSXParser(ifconfig=ifconfig)
elif distro in ['FreeBSD']:
from .parser import FreeBSDParser
parser = FreeBSDParser(ifconfig=ifconfig)
else:
raise exc.IfcfgParserError("Unknown distro type '%s'." % distro)
Log.debug("Distro detected as '%s'" % distro)
Log.debug("Using '%s'" % parser)
return parser
def interfaces():
"""
Return just the parsed interfaces dictionary from the proper parser.
"""
parser = get_parser()
return parser.interfaces
def default_interface():
"""
Return just the default interface device dictionary.
"""
parser = get_parser()
return parser.default_interface
| bendikro/python-ifcfg | ifcfg/__init__.py | Python | bsd-3-clause | 1,934 |
from numpy.testing import assert_allclose, assert_equal
from . import plt
from .. import utils
def test_path_data():
circle = plt.Circle((0, 0), 1)
vertices, codes = utils.SVG_path(circle.get_path())
assert_allclose(vertices.shape, (25, 2))
assert_equal(codes, ['M', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'Z'])
def test_linestyle():
linestyles = {'solid': 'none', '-': 'none',
#'dashed': '6,6', '--': '6,6',
#'dotted': '2,2', ':': '2,2',
#'dashdot': '4,4,2,4', '-.': '4,4,2,4',
'': None, 'None': None}
for ls, result in linestyles.items():
line, = plt.plot([1, 2, 3], linestyle=ls)
assert_equal(utils.get_dasharray(line), result)
def test_axis_w_fixed_formatter():
positions, labels = [0, 1, 10], ['A','B','C']
plt.xticks(positions, labels)
props = utils.get_axis_properties(plt.gca().xaxis)
assert_equal(props['tickvalues'], positions)
assert_equal(props['tickformat'], labels)
| mpld3/mplexporter | mplexporter/tests/test_utils.py | Python | bsd-3-clause | 1,024 |
# Copyright (c) 2015, Lars Tingelstad
# All rights reserved.
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pyversor nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Operations on directions in 3D conformal geometric algebra."""
from __pyversor__.c3d.directions import (
DirectionVector, DirectionBivector, DirectionTrivector)
| tingelst/pyversor | pyversor/c3d/directions.py | Python | bsd-3-clause | 1,721 |
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the testing decoders based on
parsed table representations.
"""
# This file generates testing code for our class decoder. The decoder
# tables are specifically written to minimize the number of decoder
# classes needed to parse valid ARM instructions. For testing, this is
# a problem. We can't (easily) tell if the intended instruction rules
# of ARM are being met, since there is not a one-to-one mapping from
# class decoders to rules.
#
# For example, consider the following two rows (from armv7.table):
#
# | 0011x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Rsb_Rule_144_A1_P288
# cccc0000011snnnnddddssss0tt1mmmm
# RegsNotPc
# | 0100x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Add_Rule_7_A1_P26
# cccc0000100snnnnddddssss0tt1mmmm
# RegsNotPc
#
# Both rows state to return a Binary4RegisterShiftedOp class decoder.
# The sequence of four symbols correspond to (in order presented):
#
# baseline - The name of the class decoder that should be used for testing.
# actual - The name of the class decoder to use in sel_ldr
# rule - A unique name identifying the rule from the manual that
# defines what the selected class decoder is to decode.
# pattern - The sequence of bits defines by the rule (above)
# constraints - Any additional constraints assumed by the rule.
#
# All but the baseline is optional. The remaining fields provide
# additional documentation and information for testing (which is used
# by this file). If the actual is not specified (prefixed by '=>')
# then it is assumed to have the same value as the baseline.
#
# If these two rows had a mergable bit pattern (which they do not),
# these rows would still not mergable since the actions are
# different. However, for sel_ldr, they both state to use a
# Binary4RegisterShiftedOp. The remaining identifiers are added data
# for testing only.
#
# We fix this by defining a notion of "action_filter" where one can
# choose to keep only those fields that are applicable. For sel_ldr,
# it's only 'actual'. For testing, it will include other fields,
# depending on the context.
#
# Note: The current ARM instruction table has both new and old
# actions. Old actions only define the 'InstClass' entry. If the
# remaining fields are omitted, the corresponding testing for those
# entries are omitted.
#
# Note: See dgen_decoder_output.py for more details on how we build a
# decoder for sel_ldr.
#
# For testing, we would like to know the specific instruction rule
# that was being tested. Further, we would like to know what
# instruction rule was chosen for each decoder class selection made by
# the parse tables. To do this, we do two levels of wrapping.
#
# This file generates a set of wrapper classes, each a subclass of
# NamedClassDecoder. One is generated for each InstClass needed by
# sel_ldr (i.e. only the 'actual' field). These named classes correspond
# to what sel_ldr will select.
#
# The named version of each named InstClass is:
#
# class NamedInstClass : public NamedClassDecoder {
# public:
# NamedInstClass()
# : NamedClassDecoder(decoder_, "InstClass")
# {}
#
# private:
# Binary3RegisterShiftedTest decoder_;
# NACL_DISALLOW_COPY_AND_ASSIGN(NamedInstClass);
#};
#
# This makes sure that each decoder class can be identified using a
# separate class decoder. For rows without rules, the corresponding
# named class 'NamedInstClass' will be used. If a row also has
# a rule, the 'NamedInstClass' is converted to 'NamedRuleInstClass' where
# 'Rule' is the name of the rule.
#
# The base class for NamedClassDecoder is specified in
# "named_class_decoder.h". This file defines a class that takes a
# ClassDecoder (reference) C and a print name NAME, and builds a
# corresponding ClassDecoder that acts like C, but will print out
# NAME. The behaviour of C is maintained by dispatching each virtual
# on the NamedClassDecoder to the corresponding virtual on C.
#
# We then define the class decoder Decoder, by defining a derived
# instance of DecoderState as follows:
#
# class NamedDecoder : DecoderState {
# public:
# explicit NamedDecoder();
# const NamedClassDecoder& decode_named(const Instruction) const;
# virtual const ClassDecoder& decode(const Instruction) const;
# ...
# };
#
# The method decode is the expected API for the NamedDecoder, which is
# an instance of DecoderState (defined in decode.h). The method
# decode_named is the same, but returns NamedClassDecoder's so that
# good error messages can be generated by the test harnesses for
# ClassDecoder's (see decoder_tester.h for more details on
# ClassDecoder test harnesses).
#
# To the NamedDecoder, we add a constant field NamedClassDecoder for
# each possible class decoder method decode_named could return, or
# that we could use in automatically generated tests. These fields
# allow us to only create the corresponding decoder classes once
# (during constructor initialization).
#
# Finally, we add a method corresponding to each defined decoder
# table. The forms of these decoders is:
#
# inline const NamedClassDecoder& decode_TABLE(
# const nacl_arm_dec::Instruction inst) const;
#
# Each of these methods are defined as inline methods so that they can
# be optimized away in the corresponding top level methods (i.e.
# decode_named and decode).
#
# For testing, there are three files generated:
#
# decoder_named_classes.h
# decoder_named_decoder.h
# decoder_named.cc
# decoder_tests.cc
#
# File decoder_named_classes.h defines the class declarations for the
# generated Rule classes, and named class decoder classes. File
# decoder_named_decoder.h defines the decoder class NamedDecoder
# (discussed above). decoder_named.cc contains the corresponding
# implementations of the constructors and methods of these classes.
#
# decoder_tests.cc generates an automatic test harness executable,
# that will test each instruction Rule. Each test generates all
# possible matches the the corresponding Pattern of the table rule,
# and calls the corresponding tester associated with the class decoder
# of that row. By default, the tester is presumed to be named.
#
# InstClassTester
#
# If the row defines a Constraints identifier, then the tester
#
# InstClassTesterConstraints
#
# is used instead.
import dgen_core
import dgen_opt
import dgen_output
import dgen_decoder
import dgen_actuals
import dgen_baselines
"""The current command line arguments to use"""
_cl_args = {}
# The following defines naming conventions used for identifiers.
# Note: DECODER will be replaced by 'actual' and 'baseline', defining
# how both types of symbols are generated.
CLASS = '%(DECODER)s_%(rule)s'
NAMED_CLASS = 'Named%(DECODER)s_%(rule)s'
INSTANCE = '%(DECODER_class)s_instance_'
BASE_TESTER='%(decoder_base)sTester%(base_test_case)s'
BASE_BASE_TESTER='%(decoder_base)sTester%(qualifier)s'
DECODER_TESTER='%(baseline)sTester_%(test_case)s'
def _safety_to_check(safety):
return [s for s in safety if not isinstance(s, str)]
def _interesting_patterns(patterns):
""" Filters out non-interesting patterns."""
# Only include rows not corresponding to rule pattern,
# and not always true.
return [ p for p in patterns if (
(not p.column or p.column.name() != '$pattern')
and not p.matches_any())]
def _install_action(decoder, action, values):
"""Install common names needed to generate code for the given action,
and adds it to the values map.
"""
# This code is somewhat inefficient in that most cases, most of the
# added strings are not needed. On the other hand, by having a
# single routine that generates all action specific names at one
# spot, it is much easier to change definitions.
values['baseline'] = action.baseline()
values['actual'] = action.actual()
values['decoder_base'] = decoder.base_class(values['baseline'])
values['rule'] = action.rule()
values['qualifier'] = ''.join([s for s in action.safety()
if isinstance(s, str)])
if action.constraints():
values['qualifier'] += (action.constraints().other
if action.constraints().other else '')
else:
values['qualifier'] =''
values['pattern'] = action.pattern()
# Add dummies for row cases, in case not set up. See
# function _install_row_cases) for more details on these fields.
for field in [ 'base_test_case', 'test_case', 'test_pattern' ]:
if not values.get(field):
values[field] = ''
values['baseline_class'] = _decoder_replace(CLASS, 'baseline') % values
values['actual_class'] = _decoder_replace(CLASS, 'actual') % values
_install_baseline_and_actuals('named_DECODER_class', NAMED_CLASS, values)
_install_baseline_and_actuals('DECODER_instance', INSTANCE, values)
values['base_tester'] = BASE_TESTER % values
values['base_base_tester'] = BASE_BASE_TESTER % values
values['decoder_tester'] = DECODER_TESTER % values
def _decoder_replace(string, basis):
return string.replace('DECODER', basis)
def _install_key_pattern(key, pattern, basis, values):
# Replace DECODER in key and pattern with basis, then
# install into values.
values[_decoder_replace(key, basis)] = (
_decoder_replace(pattern, basis) % values)
def _install_baseline_and_actuals(key, pattern, values):
# Replace DECODER with 'baseline' and 'actual', apply it
# to the key and pattern, and then install into values.
for basis in ['baseline', 'actual']:
_install_key_pattern(key, pattern, basis, values)
def _generate_baseline_and_actual(code, symbol, decoder,
values, out, actions=['rule']):
""" Generates code to define the given symbol. Does so for both
baseline and actual decoders, filtering using actions.
code - The code to generate.
symbol - The symbol being defined.
decoder - The decoder (tables) to use.
values - The name map to use to generate code.
actions - The fields to keep when generating code.
"""
generated_symbols = set()
# Generate one for each type of basline decoder.
baseline_actions = actions[:]
baseline_actions.insert(0, 'baseline');
baseline_code = _decoder_replace(code, 'baseline')
baseline_symbol = _decoder_replace(symbol, 'baseline');
for d in decoder.action_filter(baseline_actions).decoders():
_install_action(decoder, d, values);
sym_name = (baseline_symbol % values)
if sym_name not in generated_symbols:
out.write(baseline_code % values)
generated_symbols.add(sym_name)
# Generate one for each actual type that is different than the
# baseline.
actual_actions = actions[:]
actual_actions.insert(0, 'actual-not-baseline')
actual_code = _decoder_replace(code, 'actual')
actual_symbol = _decoder_replace(symbol, 'actual')
for d in decoder.action_filter(actual_actions).decoders():
# Note: 'actual-not-baseline' sets actual to None if same as baseline.
if d.actual():
_install_action(decoder, d, values);
sym_name = (actual_symbol % values)
if sym_name not in generated_symbols:
out.write(actual_code % values)
generated_symbols.add(sym_name)
# Defines the header for decoder_bases.h
NAMED_BASES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_baselines.h"
namespace nacl_arm_test {
"""
GENERATED_BASELINE_HEADER="""
/*
* Define named class decoders for each automatically generated baseline
* decoder.
*/
"""
NAMED_GEN_BASE_DECLARE="""class Named%(gen_base)s
: public NamedClassDecoder {
public:
Named%(gen_base)s()
: NamedClassDecoder(decoder_, "%(gen_base)s")
{}
private:
nacl_arm_dec::%(gen_base)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(Named%(gen_base)s);
};
"""
NAMED_BASES_H_FOOTER="""
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
NAMED_BASES_H_SUFFIX = '_named_bases.h'
def generate_named_bases_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for testing generated baselines.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith(NAMED_BASES_H_SUFFIX)
_cl_args = cl_args
decoder = dgen_baselines.AddBaselinesToDecoder(decoder)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len(NAMED_BASES_H_SUFFIX)],
'decoder_name': decoder_name,
}
out.write(NAMED_BASES_H_HEADER % values)
_generate_generated_baseline(decoder, out)
out.write(NAMED_BASES_H_FOOTER % values)
def _generate_generated_baseline(decoder, out):
""" Generates code to define the given symbol. Does so for
the generated baseline decoders, filtering using actions.
"""
generated_symbols = set()
values = {}
out.write(GENERATED_BASELINE_HEADER % values)
for d in decoder.action_filter(['generated_baseline']).decoders():
gen_base = d.find('generated_baseline')
if gen_base and gen_base not in generated_symbols:
values['gen_base'] = gen_base
out.write(NAMED_GEN_BASE_DECLARE % values)
generated_symbols.add(gen_base)
# Defines the header for decoder_named_classes.h
NAMED_CLASSES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_actuals.h"
#include "%(FILENAME_BASE)s_named_bases.h"
"""
RULE_CLASSES_HEADER="""
/*
* Define rule decoder classes.
*/
namespace nacl_arm_dec {
"""
RULE_CLASS="""class %(DECODER_class)s
: public %(DECODER)s {
};
"""
RULE_CLASS_SYM="%(DECODER_class)s"
NAMED_DECODERS_HEADER="""} // nacl_arm_dec
namespace nacl_arm_test {
/*
* Define named class decoders for each class decoder.
* The main purpose of these classes is to introduce
* instances that are named specifically to the class decoder
* and/or rule that was used to parse them. This makes testing
* much easier in that error messages use these named classes
* to clarify what row in the corresponding table was used
* to select this decoder. Without these names, debugging the
* output of the test code would be nearly impossible
*/
"""
NAMED_CLASS_DECLARE="""class %(named_DECODER_class)s
: public NamedClassDecoder {
public:
%(named_DECODER_class)s()
: NamedClassDecoder(decoder_, "%(DECODER)s %(rule)s")
{}
private:
nacl_arm_dec::%(DECODER_class)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(%(named_DECODER_class)s);
};
"""
NAMED_CLASS_DECLARE_SYM="%(named_DECODER_class)s"
NAMED_CLASSES_H_FOOTER="""
// Defines the default parse action if the table doesn't define
// an action.
class NotImplementedNamed : public NamedClassDecoder {
public:
NotImplementedNamed()
: NamedClassDecoder(decoder_, "not implemented")
{}
private:
nacl_arm_dec::NotImplemented decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(NotImplementedNamed);
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_classes_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for decoder testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_classes.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_classes.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_CLASSES_H_HEADER % values)
out.write(RULE_CLASSES_HEADER)
_generate_baseline_and_actual(RULE_CLASS, RULE_CLASS_SYM,
decoder, values, out)
out.write(NAMED_DECODERS_HEADER)
_generate_baseline_and_actual(NAMED_CLASS_DECLARE, NAMED_CLASS_DECLARE_SYM,
decoder, values, out)
out.write(NAMED_CLASSES_H_FOOTER % values)
NAMED_DECODER_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_named_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
namespace nacl_arm_test {
// Defines a (named) decoder class selector for instructions
class Named%(decoder_name)s : nacl_arm_dec::DecoderState {
public:
explicit Named%(decoder_name)s();
// Parses the given instruction, returning the named class
// decoder to use.
const NamedClassDecoder& decode_named(
const nacl_arm_dec::Instruction) const;
// Parses the given instruction, returning the class decoder
// to use.
virtual const nacl_arm_dec::ClassDecoder& decode(
const nacl_arm_dec::Instruction) const;
// The following fields define the set of class decoders
// that can be returned by the API function "decode_named". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be bulit once (and reused
// for each call to "decode_named")."""
DECODER_STATE_FIELD="""
const %(named_DECODER_class)s %(DECODER_instance)s;"""
DECODER_STATE_FIELD_NAME="%(named_DECODER_class)s"
DECODER_STATE_DECODER_COMMENTS="""
private:
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction."""
DECODER_STATE_DECODER="""
inline const NamedClassDecoder& decode_%(table)s(
const nacl_arm_dec::Instruction inst) const;"""
NAMED_DECODER_H_FOOTER="""
// Defines default action if parse tables don't define what action
// to take.
const NotImplementedNamed not_implemented_;
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_decoder_h(decoder, decoder_name, filename, out, cl_args):
"""Generates the named decoder for testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_decoder.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_decoder.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_DECODER_H_HEADER % values)
_generate_baseline_and_actual(DECODER_STATE_FIELD, DECODER_STATE_FIELD_NAME,
decoder, values, out)
out.write(DECODER_STATE_DECODER_COMMENTS)
for table in decoder.tables():
values['table'] = table.name
out.write(DECODER_STATE_DECODER % values)
out.write(NAMED_DECODER_H_FOOTER % values)
# Defines the source for DECODER_named.cc
NAMED_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "%(FILENAME_BASE)s_decoder.h"
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Instruction;
namespace nacl_arm_test {
Named%(decoder_name)s::Named%(decoder_name)s()
{}
"""
PARSE_TABLE_METHOD_HEADER="""
/*
* Implementation of table %(table_name)s.
* Specified by: %(citation)s
*/
const NamedClassDecoder& Named%(decoder_name)s::decode_%(table_name)s(
const nacl_arm_dec::Instruction inst) const {
"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
PARSE_TABLE_METHOD_ROW="""
return %(action)s;
"""
METHOD_DISPATCH_CLOSE=""" }
"""
PARSE_TABLE_METHOD_FOOTER="""
// Catch any attempt to fall through...
return not_implemented_;
}
"""
NAMED_CC_FOOTER="""
const NamedClassDecoder& Named%(decoder_name)s::
decode_named(const nacl_arm_dec::Instruction inst) const {
return decode_%(entry_table_name)s(inst);
}
const nacl_arm_dec::ClassDecoder& Named%(decoder_name)s::
decode(const nacl_arm_dec::Instruction inst) const {
return decode_named(inst).named_decoder();
}
} // namespace nacl_arm_test
"""
def generate_named_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the test decoder in .cc file
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('.cc')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'FILENAME_BASE' : filename[:-len('.cc')],
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(NAMED_CC_HEADER % values)
_generate_decoder_method_bodies(decoder, values, out)
out.write(NAMED_CC_FOOTER % values)
def _generate_decoder_method_bodies(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(
dgen_opt.optimize_rows(
table.action_filter(['baseline', 'rule']).rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation,
out.write(PARSE_TABLE_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write(" UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
if row.action.__class__.__name__ == 'DecoderAction':
_install_action(decoder, row.action, values)
action = '%(baseline_instance)s' % values
elif row.action.__class__.__name__ == 'DecoderMethod':
action = 'decode_%s(inst)' % row.action.name
else:
raise Exception('Bad table action: %s' % row.action)
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
values['action'] = action
out.write(PARSE_TABLE_METHOD_ROW % values)
out.write(METHOD_DISPATCH_CLOSE)
out.write(PARSE_TABLE_METHOD_FOOTER % values)
# Define the source for DECODER_tests.cc
TEST_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "gtest/gtest.h"
#include "native_client/src/trusted/validator_arm/actual_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/baseline_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/inst_classes_testers.h"
#include "native_client/src/trusted/validator_arm/arm_helpers.h"
#include "native_client/src/trusted/validator_arm/gen/arm32_decode_named_bases.h"
using nacl_arm_dec::Instruction;
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Register;
using nacl_arm_dec::RegisterList;
namespace nacl_arm_test {
// The following classes are derived class decoder testers that
// add row pattern constraints and decoder restrictions to each tester.
// This is done so that it can be used to make sure that the
// corresponding pattern is not tested for cases that would be excluded
// due to row checks, or restrictions specified by the row restrictions.
"""
CONSTRAINT_TESTER_CLASS_HEADER="""
// %(row_comment)s
class %(base_tester)s
: public %(base_base_tester)s {
public:
%(base_tester)s(const NamedClassDecoder& decoder)
: %(base_base_tester)s(decoder) {}"""
CONSTRAINT_TESTER_RESTRICTIONS_HEADER="""
virtual bool PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_SANITY_HEADER="""
virtual bool ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_CLASS_CLOSE="""
};
"""
CONSTRAINT_TESTER_PARSE_HEADER="""
bool %(base_tester)s
::PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {"""
ROW_CONSTRAINTS_HEADER="""
// Check that row patterns apply to pattern being checked.'"""
PATTERN_CONSTRAINT_RESTRICTIONS_HEADER="""
// Check pattern restrictions of row."""
CONSTRAINT_CHECK="""
// %(comment)s
if (%(code)s) return false;"""
CONSTRAINT_TESTER_CLASS_FOOTER="""
// Check other preconditions defined for the base decoder.
return %(base_base_tester)s::
PassesParsePreconditions(inst, decoder);
}
"""
SAFETY_TESTER_HEADER="""
bool %(base_tester)s
::ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {
NC_PRECOND(%(base_base_tester)s::
ApplySanityChecks(inst, decoder));"""
SAFETY_TESTER_CHECK="""
// safety: %(comment)s
EXPECT_TRUE(%(code)s);"""
DEFS_SAFETY_CHECK="""
// defs: %(comment)s;
EXPECT_TRUE(decoder.defs(inst).IsSame(%(code)s));"""
SAFETY_TESTER_FOOTER="""
return true;
}
"""
TESTER_CLASS_HEADER="""
// The following are derived class decoder testers for decoder actions
// associated with a pattern of an action. These derived classes introduce
// a default constructor that automatically initializes the expected decoder
// to the corresponding instance in the generated DecoderState.
"""
TESTER_CLASS="""
// %(row_comment)s
class %(decoder_tester)s
: public %(base_tester)s {
public:
%(decoder_tester)s()
: %(base_tester)s(
state_.%(baseline_instance)s)
{}
};
"""
TEST_HARNESS="""
// Defines a gtest testing harness for tests.
class %(decoder_name)sTests : public ::testing::Test {
protected:
%(decoder_name)sTests() {}
};
// The following functions test each pattern specified in parse
// decoder tables.
"""
TEST_FUNCTION_ACTUAL_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s baseline_tester;
%(named_actual_class)s actual;
ActualVsBaselineTester a_vs_b_tester(actual, baseline_tester);
a_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s tester;
tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
BvB_%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s old_baseline_tester;
Named%(gen_decoder)s gen_baseline;
BaselineVsBaselineTester b_vs_b_tester(gen_baseline, old_baseline_tester);
b_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_CC_FOOTER="""
} // namespace nacl_arm_test
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
"""
def generate_tests_cc(decoder, decoder_name, out, cl_args, tables):
"""Generates pattern tests for the rows in the given list of tables
in the given decoder."""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
decoder = dgen_baselines.AddBaselinesToDecoder(decoder, tables)
baselines = cl_args.get('test-base')
if not baselines: baselines = []
decoder = _decoder_restricted_to_tables(decoder, tables)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'decoder_name': decoder_name,
}
out.write(TEST_CC_HEADER % values)
_generate_constraint_testers(decoder, values, out)
_generate_rule_testers(decoder, values, out)
out.write(TEST_HARNESS % values)
_generate_test_patterns_with_baseline_tests(decoder, values, out, baselines)
out.write(TEST_CC_FOOTER % values)
def _filter_test_action(action, with_patterns, with_rules):
"""Filters the actions to pull out relavant entries, based on whether we
want to include patterns and rules.
"""
action_fields = ['actual', 'baseline', 'generated_baseline',
'constraints'] + dgen_decoder.METHODS
if with_patterns:
action_fields += ['pattern' ]
if with_rules:
action_fields += ['rule']
return action.action_filter(action_fields)
def _filter_test_row(row, with_patterns=False, with_rules=True):
"""Filters a row t pulll out actions with relavant entries, based on
whether we want to include patterns and rules.
"""
return row.copy_with_action(
_filter_test_action(row.action, with_patterns, with_rules))
def _install_row_cases(row, values):
"""Installs row case names, based on values entries."""
# First define base testers that add row constraints and safety checks.
constraint_rows_map = values.get('constraint_rows')
if constraint_rows_map:
base_row = _filter_test_row(row, with_rules=False)
values['base_test_case'] = (
'Case%s' % constraint_rows_map[dgen_core.neutral_repr(base_row)])
else:
values['base_test_case'] = ''
# Add test decoders associated with the row in the table.
decoder_rows_map = values.get('decoder_rows')
if decoder_rows_map:
decoder_row = _filter_test_row(row)
values['test_case'] = (
'Case%s' % decoder_rows_map[dgen_core.neutral_repr(decoder_row)])
else:
values['test_case'] = ''
# Encorporate patterns with each row.
pattern_rows_map = values.get('test_rows')
if pattern_rows_map:
pattern_row = _filter_test_row(row, with_patterns=True)
values['test_pattern'] = (
'Case%s' % pattern_rows_map[dgen_core.neutral_repr(pattern_row)])
else:
values['test_pattern'] = ''
def _install_test_row(row, decoder, values,
with_patterns=False, with_rules=True):
"""Installs data associated with the given row into the values map.
Installs the baseline class, rule name, and constraints associated
with the row. If with_patterns is specified, then pattern information and
actual class information is also inserted.
"""
action = _filter_test_action(row.action, with_patterns, with_rules)
values['row_comment'] = dgen_output.commented_string(
repr(row.copy_with_action(action)))
_install_action(decoder, action, values)
return action
def _rows_to_test(decoder, values, with_patterns=False, with_rules=True):
"""Returns the rows of the decoder that define enough information
that testing can be done.
"""
generated_names = set()
rows = []
for table in decoder.tables():
for row in table.rows():
if (isinstance(row.action, dgen_core.DecoderAction) and
row.action.pattern()):
new_row = row.copy_with_action(
_install_test_row(row, decoder, values, with_patterns, with_rules))
constraint_tester = dgen_core.neutral_repr(new_row)
if constraint_tester not in generated_names:
generated_names.add(constraint_tester)
rows.append(new_row)
return sorted(rows)
def _row_filter_interesting_patterns(row):
"""Builds a copy of the row, removing uninteresting column patterns."""
return row.copy_with_patterns(_interesting_patterns(row.patterns))
def _generate_constraint_testers(decoder, values, out):
"""Generates the testers needed to implement the constraints
associated with each row having a pattern.
"""
rows = _rows_to_test(decoder, values, with_rules=False)
values['constraint_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values)
safety_to_check = _safety_to_check(action.safety())
defs_to_check = action.defs()
out.write(CONSTRAINT_TESTER_CLASS_HEADER % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_RESTRICTIONS_HEADER % values);
if safety_to_check or defs_to_check:
out.write(CONSTRAINT_TESTER_SANITY_HEADER % values)
out.write(CONSTRAINT_TESTER_CLASS_CLOSE % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_PARSE_HEADER % values)
if row.patterns:
out.write(ROW_CONSTRAINTS_HEADER % values);
for p in row.patterns:
not_p = p.negate()
values['comment'] = dgen_output.commented_string(repr(not_p), ' ')
values['code'] = not_p.to_bool()
out.write(CONSTRAINT_CHECK % values)
if action.constraints().restrictions:
out.write(PATTERN_CONSTRAINT_RESTRICTIONS_HEADER)
for c in action.constraints().restrictions:
not_c = c.negate()
values['comment'] = dgen_output.commented_string(repr(not_c), ' ')
values['code'] = not_c.to_bool()
out.write(CONSTRAINT_CHECK % values)
out.write(CONSTRAINT_TESTER_CLASS_FOOTER % values)
if safety_to_check or defs_to_check:
out.write(SAFETY_TESTER_HEADER % values)
for check in safety_to_check:
values['comment'] = dgen_output.commented_string(
repr(check), ' ')
values['code'] = check.to_bool()
out.write(SAFETY_TESTER_CHECK % values)
if defs_to_check:
values['comment'] = dgen_output.commented_string(
repr(defs_to_check), ' ')
values['code'] = defs_to_check.to_register_list()
out.write(DEFS_SAFETY_CHECK % values)
out.write(SAFETY_TESTER_FOOTER % values)
def _generate_rule_testers(decoder, values, out):
"""Generates the testers that tests the rule associated with
each row having a pattern.
"""
out.write(TESTER_CLASS_HEADER % values)
rows = _rows_to_test(decoder, values)
values['decoder_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
_install_test_row(row, decoder, values)
out.write(TESTER_CLASS % values)
def _decoder_restricted_to_tables(decoder, tables):
"""Returns a copy of the decoder, with only the given table names (
or all tables if no names are specified.
"""
if not tables:
return decoder
new_decoder = dgen_core.Decoder()
for tbl in [tbl for tbl in decoder.tables() if tbl.name in tables]:
new_decoder.add(tbl)
new_decoder.set_class_defs(decoder.get_class_defs())
return new_decoder
def _generate_test_patterns_with_baseline_tests(
decoder, values, out, baseline_test_tables):
_generate_test_patterns(decoder, values, out, False)
_generate_test_patterns(
_decoder_restricted_to_tables(decoder, baseline_test_tables),
values, out, True)
def _generate_test_patterns(decoder, values, out, add_baseline_tests):
"""Generates a test function for each row having a pattern associated
with the table row.
"""
rows = _rows_to_test(decoder, values, with_patterns=True)
values['test_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values, with_patterns=True)
if add_baseline_tests:
if action.find('generated_baseline'):
values['gen_decoder'] = action.find('generated_baseline')
out.write(TEST_FUNCTION_BASELINE_VS_BASELINE % values)
elif action.actual() == action.baseline():
out.write(TEST_FUNCTION_BASELINE % values)
else:
out.write(TEST_FUNCTION_ACTUAL_VS_BASELINE % values)
def _index_neutral_map(values):
"""Returns a dictionary from each neutral_repr(value) in list
values, to its corresponding index. This is done to reduce the
number of compares to find the index, speeding up code
generation.
"""
lookup_map = {}
index = 0
for v in values:
lookup_map[dgen_core.neutral_repr(v)] = index
index += 1
return lookup_map
| nacl-webkit/native_client | src/trusted/validator_arm/dgen_test_output.py | Python | bsd-3-clause | 40,246 |
from django.conf.urls import patterns, url
from .views import MediaLibraryAPIView, MediaLibraryItemView, AddShelfRelationAPIView
urlpatterns = patterns('',
url(r'^(?P<type>(audio|video|image))/$', MediaLibraryAPIView.as_view(), name='medialibrary'),
url(r'^(?P<pk>\d+)/$', MediaLibraryItemView.as_view(), name='medialibrary-shelf'),
url(r'^(?P<pk>\d+)/add/$', AddShelfRelationAPIView.as_view(), name='medialibrary-shelf-add-relation')
)
| pulilab/django-medialibrary | medialibrary/urls.py | Python | bsd-3-clause | 450 |
#!/usr/bin/env python
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
Taken from https://github.com/mbrochh/tdd-with-django-reusable-app
"""
import os
import sys
from django.conf import settings
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'portlet',
'django_nose',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
if not settings.configured:
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='portlet.urls',
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), '../templates'),
),
COVERAGE_MODULE_EXCLUDES=COVERAGE_MODULE_EXCLUDES,
COVERAGE_REPORT_HTML_OUTPUT_DIR=os.path.join(
os.path.dirname(__file__), 'coverage')
)
from django_coverage.coverage_runner import CoverageRunner
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(CoverageRunner, NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
pass
def runtests(*test_args):
failures = NoseTestSuiteRunner(verbosity=2, interactive=True).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| uhuramedia/django-portlet | portlet/tests/runtests.py | Python | bsd-3-clause | 2,032 |
try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
from .views import EventDetail, EventList, EventCreate, EventCreateJSON, EventDelete, EventUpdate
urlpatterns = patterns("events.views",
url(r"^$", EventList.as_view(template_name='events/event_list_calendar.html'), name='list'),
#url(r"^$", EventList.as_view(), name='list'),
url(r"^create/$", EventCreate.as_view(), name='create'),
url(r"^create/json/$", EventCreateJSON.as_view(), name='create_json'),
url(r"^(?P<pk>\d+)/$", EventDetail.as_view(), name='detail'),
url(r"^(?P<pk>\d+)/update$", EventUpdate.as_view(), name='update'),
url(r"^(?P<pk>\d+)/delete/$", EventDelete.as_view(), name='delete'),
url(r"^(?P<event_id>\d+)/rsvp/$", 'rsvp_event', name='rsvp'),
url(r"^(?P<event_id>\d+)/attend/$", 'attend_event', name='attend'),
#url(r"^calendar/(?P<year>\d+)/(?P<month>\d+)/$", 'calendar', name='calendar'),
#url(r"^calendar/$", CalendarRedirectView.as_view(), name='calendar-redirect'),
)
| goldhand/onegreek | onegreek/events/urls.py | Python | bsd-3-clause | 1,273 |
from copy import copy
import datetime
import time
import urllib2
from nose.tools import assert_equals
from nose.plugins.skip import SkipTest
from autoscalebot import TOO_LOW, JUST_RIGHT, TOO_HIGH
from autoscalebot.conf import AutoscaleSettings
from autoscalebot.models import HerokuAutoscaler
class TestSettings(AutoscaleSettings):
pass
test_settings = TestSettings()
test_settings.HEROKU_APP_NAME = "test-app"
test_settings.HEROKU_API_KEY = "1234567"
test_settings.HEARTBEAT_INTERVAL_IN_SECONDS = 30
test_settings.HEARTBEAT_URL = 'http://www.google.com'
test_settings.MAX_RESPONSE_TIME_IN_MS = 1000
test_settings.MIN_RESPONSE_TIME_IN_MS = 400
test_settings.NUMBER_OF_FAILS_TO_SCALE_UP_AFTER = 3
test_settings.NUMBER_OF_PASSES_TO_SCALE_DOWN_AFTER = 5
test_settings.MAX_DYNOS = 3
test_settings.MIN_DYNOS = 1
test_settings.INCREMENT = 1
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_THRESHOLD = None
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_PERIOD_IN_MINUTES = None
test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = True
test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = True
test_settings.NOTIFICATION_BACKENDS = ["autoscalebot.backends.notification.TestBackend", ]
class MockHerokuProcesses:
def __init__(self):
self.current = 0
self._processes = [1, ]
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = [1, ]
return self._processes
def scale(self, new_num):
self._processes = [n + 1 for n in range(0, new_num)]
def __iter__(self):
return self
def next(self):
self.current += 1
if self.current > len(self.processes):
raise StopIteration
else:
return self.processes[self.current - 1]
class MockBrokenHerokuProcesses(MockHerokuProcesses):
def scale(self):
raise Exception
class MockHerokuApp:
def __init__(self, *args, **kwargs):
self.processes
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockHerokuProcesses(), }
return self._processes
class MockBrokenHerokuApp(MockHerokuApp):
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockBrokenHerokuProcesses(), }
return self._processes
class MockHerokuAutoscaler(HerokuAutoscaler):
def __init__(self, *args, **kwargs):
super(MockHerokuAutoscaler, self).__init__(*args, **kwargs)
self.heroku_app
@property
def heroku_app(self):
if not hasattr(self, "_heroku_app"):
self._heroku_app = MockHerokuApp()
return self._heroku_app
def out_of_band_heroku_scale(self, num_dynos):
# Ugly mock out of band scale
self.heroku_app.processes["web"]._processes = [1, 2, 3, 4]
self._num_dynos = len([i for i in self.heroku_app.processes["web"]._processes])
class MockValidResponse:
def read(self, *args, **kwargs):
return "A"
class Mock500Response:
def read(self, *args, **kwargs):
raise Exception
def mock_valid_urlopen(self, *args, **kwargs):
time.sleep(0.5)
return MockValidResponse()
def mock_invalid_urlopen(self, *args, **kwargs):
return Mock500Response()
def mock_fast_urlopen(self, *args, **kwargs):
return MockValidResponse()
def mock_slow_urlopen(self, *args, **kwargs):
time.sleep(2)
return MockValidResponse()
class TestHerokuAutoscaler:
def setUp(self):
self.test_scaler
@property
def test_scaler(self):
if not hasattr(self, "_test_scaler"):
self._test_scaler = MockHerokuAutoscaler(test_settings)
return self._test_scaler
def test_heroku_scale(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.heroku_scale(3)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(5)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(2)
assert_equals(self.test_scaler.num_dynos, 2)
def test_num_dynos(self):
self.test_scaler.heroku_scale(3)
assert_equals(len([i for i in self.test_scaler.heroku_app.processes['web']]), 3)
def test_add_to_history(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(JUST_RIGHT)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_HIGH, JUST_RIGHT])
def test_add_to_history_caps_length(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW])
def test_needs_scale_up_works(self):
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_up, False)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_up, True)
def test_needs_scale_down_works(self):
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_down, False)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_down, True)
def test_scale_up(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_up_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
def test_scale_down(self):
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_down_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 1)
def test_do_autoscale_up_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_do_autoscale_down_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
def test_max_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.max_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.max_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.max_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.max_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.max_num_dynos(when=morning_exact), 5)
def test_min_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.min_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.min_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.min_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.min_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.min_num_dynos(when=morning_exact), 5)
def test_custom_increments_work(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.INCREMENT = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_if_min_is_changed_to_higher_than_current_scaling_works(self):
self.test_scaler.heroku_scale(1)
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_if_max_is_changed_to_lower_than_current_scaling_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.out_of_band_heroku_scale(4)
assert_equals(self.test_scaler.num_dynos, 4)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scaling_clears_the_results_queue(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
assert_equals(self.test_scaler.results, [])
def test_a_mixed_stack_of_low_high_scales_to_the_min_needed_for_the_condition(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_ping_and_store_for_valid_url(self):
urllib2.urlopen = mock_valid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [JUST_RIGHT])
def test_ping_and_store_for_invalid_url(self):
urllib2.urlopen = mock_invalid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_slow_url(self):
urllib2.urlopen = mock_slow_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_fast_url(self):
urllib2.urlopen = mock_fast_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_LOW])
def test_notify_if_scale_diff_exceeds_threshold_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
print "Feature not written"
raise SkipTest
def test_notify_if_scale_diff_exceeds_period_in_minutes_works(self):
print "Feature not written"
raise SkipTest
def test_notify_if_needs_exceed_max_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "max" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_below_min_does_not_notify_on_one_dyno_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "min" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_exceed_max_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = False
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = False
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_on_scale_fails_works(self):
self.test_scaler._heroku_app = MockBrokenHerokuApp()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "fail" in self.test_scaler.backends[0].messages[0]
def test_notify_on_every_scale_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
def test_all_backends_are_called_on_notification(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFICATION_BACKENDS = [
"autoscalebot.backends.notification.TestBackend",
"autoscalebot.backends.notification.TestBackend"
]
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals([len(b.messages) for b in self.test_scaler.backends], [0, 0])
self.test_scaler.scale_up()
assert_equals([len(b.messages) for b in self.test_scaler.backends], [1, 1])
# TODO: django tests
| wieden-kennedy/autoscalebot | autoscalebot/tests.py | Python | bsd-3-clause | 21,789 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import guidanceresponse
from .fhirdate import FHIRDate
class GuidanceResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("GuidanceResponse", js["resourceType"])
return guidanceresponse.GuidanceResponse(js)
def testGuidanceResponse1(self):
inst = self.instantiate_from("guidanceresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a GuidanceResponse instance")
self.implGuidanceResponse1(inst)
js = inst.as_json()
self.assertEqual("GuidanceResponse", js["resourceType"])
inst2 = guidanceresponse.GuidanceResponse(js)
self.implGuidanceResponse1(inst2)
def implGuidanceResponse1(self, inst):
self.assertEqual(inst.contained[0].id, "outputParameters1")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier.system, "http://example.org")
self.assertEqual(inst.identifier.value, "guidanceResponse1")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2017-03-10T16:02:00Z").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2017-03-10T16:02:00Z")
self.assertEqual(inst.reasonCodeableConcept.text, "Guideline Appropriate Ordering Assessment")
self.assertEqual(inst.requestId, "guidanceRequest1")
self.assertEqual(inst.status, "success")
self.assertEqual(inst.text.status, "generated")
| all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/guidanceresponse_tests.py | Python | bsd-3-clause | 1,811 |
from distutils.core import setup
import os
import glob
setup(
name = 'pyspecfit',
url = 'http://justincely.github.io',
version = '0.0.1',
description = 'interact with IRAF task specfit I/O products',
author = 'Justin Ely',
author_email = '[email protected]',
keywords = ['astronomy'],
classifiers = ['Programming Language :: Python',
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules'],
packages = ['pyspecfit']
)
| justincely/pyspecfit | setup.py | Python | bsd-3-clause | 731 |
from django.conf import settings
from .locals import get_cid
DEFAULT_CID_SQL_COMMENT_TEMPLATE = 'cid: {cid}'
class CidCursorWrapper:
"""
A cursor wrapper that attempts to add a cid comment to each query
"""
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def add_comment(self, sql):
cid_sql_template = getattr(
settings, 'CID_SQL_COMMENT_TEMPLATE', DEFAULT_CID_SQL_COMMENT_TEMPLATE
)
cid = get_cid()
if not cid:
return sql
# FIXME (dbaty): we could use "--" prefixed comments so that
# we would not have to bother with escaping the cid (assuming
# it does not contain newline characters).
cid = cid.replace('/*', r'\/\*').replace('*/', r'\*\/')
return "/* {} */\n{}".format(cid_sql_template.format(cid=cid), sql)
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
sql = self.add_comment(sql)
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
sql = self.add_comment(sql)
return self.cursor.executemany(sql, param_list)
| snowball-one/cid | cid/cursor.py | Python | bsd-3-clause | 1,697 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté RelacherRames"""
import re
from secondaires.navigation.equipage.ordres.relacher_rames import \
RelacherRames as OrdreRelacherRames
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class RelacherRames(Volonte):
"""Classe représentant une volonté.
Cette volonté demande à ceux qui tiennent les rames de les lâcher.
"""
cle = "relacher_rames"
ordre_court = re.compile(r"^rr$", re.I)
ordre_long = re.compile(r"^relacher\s+rames?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
equipage = self.navire.equipage
objectifs = []
rames = self.navire.rames
rames = [r for r in rames if r.tenu]
for paire in rames:
matelot = equipage.get_matelot_depuis_personnage(paire.tenu)
if matelot:
objectifs.append((matelot, paire))
return objectifs
def executer(self, objectifs):
"""Exécute la volonté."""
for sequence in objectifs:
matelot, rames = sequence
matelot.invalider_ordres("ramer")
navire = self.navire
ordres = []
relacher = OrdreRelacherRames(matelot, navire, rames)
ordres.append(relacher)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : rameurs, laissez courir !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
| stormi/tsunami | src/secondaires/navigation/equipage/volontes/relacher_rames.py | Python | bsd-3-clause | 3,392 |
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollsig', 'poll', 'poll-cv'],
# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool
'mac': ['poll'],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if polling_strategy in target.get('excluded_poll_engines', []):
continue
# Scale overall test timeout if running under various sanitizers.
config = self.args.config
if ('asan' in config
or config == 'msan'
or config == 'tsan'
or config == 'ubsan'
or config == 'helgrind'
or config == 'memcheck'):
timeout_scaling *= 20
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
if self._use_cmake:
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
else:
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
'check_epollexclusive']
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self._use_cmake:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'node8',
'electron1.3', 'electron1.6'])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
if self.config == 'dbg':
config_flag = '--debug'
else:
config_flag = '--release'
return [['tools\\run_tests\\helper_scripts\\build_node.bat',
config_flag]]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6',
'coreclr',
'cmake'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument('--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
if makefile.startswith('cmake/build/'):
return [jobset.JobSpec(['cmake', '--build', '.',
'--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets]
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)]
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg,
'Q='] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
try:
subprocess.check_call('bins/%s/check_epollexclusive' % args.config)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success, max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| vsco/grpc | tools/run_tests/run_tests.py | Python | bsd-3-clause | 56,812 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
DEFAULT_SAVED_SEARCHES = [
{
'name': 'Unresolved Issues',
'query': 'is:unresolved',
},
{
'name': 'Needs Triage',
'query': 'is:unresolved is:unassigned'
},
{
'name': 'Assigned To Me',
'query': 'is:unresolved assigned:me'
},
{
'name': 'My Bookmarks',
'query': 'is:unresolved bookmarks:me'
},
{
'name': 'New Today',
'query': 'is:unresolved age:-24h'
},
]
class Migration(DataMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
SavedSearch = orm['sentry.SavedSearch']
for search in RangeQuerySetWrapperWithProgressBar(
SavedSearch.objects.filter(is_global__isnull=True)
):
search.is_global = False
search.save()
default_searches = []
for search in DEFAULT_SAVED_SEARCHES:
default_searches.append(
SavedSearch(
name=search['name'],
query=search['query'],
is_global=True,
)
)
SavedSearch.objects.bulk_create(default_searches)
def backwards(self, orm):
"Write your backwards methods here."
# These will be the only rows with a null `project_id`, so we can safely
# make the column `not null` after deleting them.
SavedSearch = orm['sentry.SavedSearch']
SavedSearch.objects.filter(is_global=True).delete()
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'82029f091b094a2ca18ef45d3958513c683b4643c65f4fbfacfbd1cdee187a51'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'74093ba9478e4d41ae25dfcb036bd062ea58b43d394140a4989d6ec19f179b6a'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Proper Crawdad'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'12d42834d62142d4beaecf34588354dd'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 18, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 17, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'85185474cfa44548852999d7e605898adf44aa5a15b04d0da501445694f622a7'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'7ff1f4918fe84acfa4172b59731cc504d6faae0cc6234271a6dcbc8c332cd65a'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 25, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dashboard': {
'Meta': {'unique_together': "(('organization', 'title'),)", 'object_name': 'Dashboard'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'object_name': 'DiscoverSavedQuery'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'971a077057f54734809e8dc332408db4'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'object_name': 'ProjectAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectcficachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectCfiCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectdebugfile': {
'Meta': {'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'),)"},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.promptsactivity': {
'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'"},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.relay': {
'Meta': {'object_name': 'Relay'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_global': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 17, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'872e507076244783876be9f74901e6ae'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'object_name': 'SentryApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.TextField', [], {}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'3e3a2f6f-9136-4a26-8155-31829e66ecc4'", 'max_length': '64'}),
'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.sentryappavatar': {
'Meta': {'object_name': 'SentryAppAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"})
},
'sentry.sentryappinstallation': {
'Meta': {'object_name': 'SentryAppInstallation'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'authorization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiAuthorization']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'installations'", 'to': "orm['sentry.SentryApp']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'227713f5-78ab-401b-a90d-80720b8ed80e'", 'max_length': '64'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'f9f6f76988ea45d5b1029e2e604e95ace68b3d9e8c2743b9a7755a347e488f74'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'object_name': 'TeamAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'flfk8HwwLBodD7uLZ8Jy6PhjawDBeBKT'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.widget': {
'Meta': {'unique_together': "(('dashboard', 'order'), ('dashboard', 'title'))", 'object_name': 'Widget'},
'dashboard': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Dashboard']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'display_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.widgetdatasource': {
'Meta': {'unique_together': "(('widget', 'name'), ('widget', 'order'))", 'object_name': 'WidgetDataSource'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'widget': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Widget']"})
}
}
complete_apps = ['sentry']
symmetrical = True
| mvaled/sentry | src/sentry/south_migrations/0458_global_searches_data_migration.py | Python | bsd-3-clause | 120,420 |
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i8', (3,))])
assert str(t_ma[0]) == "([1, --, 3],)"
assert repr(t_ma[0]) == "([1, --, 3],)"
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i8', (2,2))])
assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i8'), ('b', '<i8')])
assert str(t_0d[0]) == "(--, 2)"
assert repr(t_0d[0]) == "(--, 2)"
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i8', (2,2)), ('b', float)])
assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i8'), ('b', 'i4,i4')])
assert str(t_ne[0]) == "(--, (--, 1))"
assert repr(t_ne[0]) == "(--, (--, 1))"
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert mx[0] is mx1
assert mx[1] is not mx2
assert np.all(mx[1].data == mx2.data)
assert np.all(mx[1].mask)
# check that we return a view.
mx[1].data[0] = 0.
assert mx2[0] == 0.
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(IndexError, ott.count, 1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data,
# [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
###############################################################################
if __name__ == "__main__":
run_module_suite()
| pyparallel/numpy | numpy/ma/tests/test_core.py | Python | bsd-3-clause | 161,025 |
import torch
import pickle
import logging
from .baseclasses import ScalarMonitor
from .meta import Regurgitate
class Saver(ScalarMonitor):
def __init__(self, save_monitor, model_file, settings_file, **kwargs):
self.saved = False
self.save_monitor = save_monitor
self.model_file = model_file
self.settings_file = settings_file
super().__init__('save', **kwargs)
def call(self, model=None, settings=None, **kwargs):
if self.value is None:
self.value = self.save_monitor.value
if self.save_monitor.changed:
self.save(model, settings)
self.value = self.save_monitor.value
return self.value
def save(self, model, settings):
with open(self.model_file, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
if torch.cuda.is_available():
model.cuda()
with open(self.settings_file, "wb") as f:
pickle.dump(settings, f)
| isaachenrion/jets | src/monitors/saver.py | Python | bsd-3-clause | 985 |
from distutils.core import setup
import toolkit_library
from toolkit_library import inspector
def read_modules():
result = ''
package = inspector.PackageInspector(toolkit_library)
for module in package.get_all_modules():
exec('from toolkit_library import {0}'.format(module))
result = '{0}{1}\n'.format(result, eval('{0}.__doc__'.format(module)))
return result.rstrip()
readme = ''
with open('README_template', 'r') as file:
readme = file.read()
readme = readme.replace('{{ modules }}', read_modules())
with open('README.rst', 'w') as file:
file.write(readme)
setup(
name = toolkit_library.__name__,
version = toolkit_library.__version__,
url = 'https://github.com/tylerlong/toolkit_library',
license = 'BSD',
author = toolkit_library.__author__,
author_email = '[email protected]',
description = 'Toolkit Library, full of useful toolkits',
long_description = readme,
packages = ['toolkit_library', ],
platforms = 'any',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| tylerlong/toolkit_library | setup.py | Python | bsd-3-clause | 1,418 |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
| sonnyhu/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 41,818 |
#!/usr/bin/env python3
#
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Adds the code parts to a resource APK."""
import argparse
import logging
import os
import shutil
import sys
import tempfile
import zipfile
import zlib
import finalize_apk
from util import build_utils
from util import diff_utils
from util import zipalign
# Input dex.jar files are zipaligned.
zipalign.ApplyZipFileZipAlignFix()
# Taken from aapt's Package.cpp:
_NO_COMPRESS_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif', '.wav', '.mp2',
'.mp3', '.ogg', '.aac', '.mpg', '.mpeg', '.mid',
'.midi', '.smf', '.jet', '.rtttl', '.imy', '.xmf',
'.mp4', '.m4a', '.m4v', '.3gp', '.3gpp', '.3g2',
'.3gpp2', '.amr', '.awb', '.wma', '.wmv', '.webm')
def _ParseArgs(args):
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument(
'--assets',
help='GYP-list of files to add as assets in the form '
'"srcPath:zipPath", where ":zipPath" is optional.')
parser.add_argument(
'--java-resources', help='GYP-list of java_resources JARs to include.')
parser.add_argument('--write-asset-list',
action='store_true',
help='Whether to create an assets/assets_list file.')
parser.add_argument(
'--uncompressed-assets',
help='Same as --assets, except disables compression.')
parser.add_argument('--resource-apk',
help='An .ap_ file built using aapt',
required=True)
parser.add_argument('--output-apk',
help='Path to the output file',
required=True)
parser.add_argument('--format', choices=['apk', 'bundle-module'],
default='apk', help='Specify output format.')
parser.add_argument('--dex-file',
help='Path to the classes.dex to use')
parser.add_argument(
'--jdk-libs-dex-file',
help='Path to classes.dex created by dex_jdk_libs.py')
parser.add_argument('--uncompress-dex', action='store_true',
help='Store .dex files uncompressed in the APK')
parser.add_argument('--native-libs',
action='append',
help='GYP-list of native libraries to include. '
'Can be specified multiple times.',
default=[])
parser.add_argument('--secondary-native-libs',
action='append',
help='GYP-list of native libraries for secondary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_argument('--android-abi',
help='Android architecture to use for native libraries')
parser.add_argument('--secondary-android-abi',
help='The secondary Android architecture to use for'
'secondary native libraries')
parser.add_argument(
'--is-multi-abi',
action='store_true',
help='Will add a placeholder for the missing ABI if no native libs or '
'placeholders are set for either the primary or secondary ABI. Can only '
'be set if both --android-abi and --secondary-android-abi are set.')
parser.add_argument(
'--native-lib-placeholders',
help='GYP-list of native library placeholders to add.')
parser.add_argument(
'--secondary-native-lib-placeholders',
help='GYP-list of native library placeholders to add '
'for the secondary ABI')
parser.add_argument('--uncompress-shared-libraries', default='False',
choices=['true', 'True', 'false', 'False'],
help='Whether to uncompress native shared libraries. Argument must be '
'a boolean value.')
parser.add_argument(
'--apksigner-jar', help='Path to the apksigner executable.')
parser.add_argument('--zipalign-path',
help='Path to the zipalign executable.')
parser.add_argument('--key-path',
help='Path to keystore for signing.')
parser.add_argument('--key-passwd',
help='Keystore password')
parser.add_argument('--key-name',
help='Keystore name')
parser.add_argument(
'--min-sdk-version', required=True, help='Value of APK\'s minSdkVersion')
parser.add_argument(
'--best-compression',
action='store_true',
help='Use zip -9 rather than zip -1')
parser.add_argument(
'--library-always-compress',
action='append',
help='The list of library files that we always compress.')
parser.add_argument(
'--library-renames',
action='append',
help='The list of library files that we prepend crazy. to their names.')
parser.add_argument('--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
diff_utils.AddCommandLineFlags(parser)
options = parser.parse_args(args)
options.assets = build_utils.ParseGnList(options.assets)
options.uncompressed_assets = build_utils.ParseGnList(
options.uncompressed_assets)
options.native_lib_placeholders = build_utils.ParseGnList(
options.native_lib_placeholders)
options.secondary_native_lib_placeholders = build_utils.ParseGnList(
options.secondary_native_lib_placeholders)
options.java_resources = build_utils.ParseGnList(options.java_resources)
options.native_libs = build_utils.ParseGnList(options.native_libs)
options.secondary_native_libs = build_utils.ParseGnList(
options.secondary_native_libs)
options.library_always_compress = build_utils.ParseGnList(
options.library_always_compress)
options.library_renames = build_utils.ParseGnList(options.library_renames)
# --apksigner-jar, --zipalign-path, --key-xxx arguments are
# required when building an APK, but not a bundle module.
if options.format == 'apk':
required_args = [
'apksigner_jar', 'zipalign_path', 'key_path', 'key_passwd', 'key_name'
]
for required in required_args:
if not vars(options)[required]:
raise Exception('Argument --%s is required for APKs.' % (
required.replace('_', '-')))
options.uncompress_shared_libraries = \
options.uncompress_shared_libraries in [ 'true', 'True' ]
if not options.android_abi and (options.native_libs or
options.native_lib_placeholders):
raise Exception('Must specify --android-abi with --native-libs')
if not options.secondary_android_abi and (options.secondary_native_libs or
options.secondary_native_lib_placeholders):
raise Exception('Must specify --secondary-android-abi with'
' --secondary-native-libs')
if options.is_multi_abi and not (options.android_abi
and options.secondary_android_abi):
raise Exception('Must specify --is-multi-abi with both --android-abi '
'and --secondary-android-abi.')
return options
def _SplitAssetPath(path):
"""Returns (src, dest) given an asset path in the form src[:dest]."""
path_parts = path.split(':')
src_path = path_parts[0]
if len(path_parts) > 1:
dest_path = path_parts[1]
else:
dest_path = os.path.basename(src_path)
return src_path, dest_path
def _ExpandPaths(paths):
"""Converts src:dst into tuples and enumerates files within directories.
Args:
paths: Paths in the form "src_path:dest_path"
Returns:
A list of (src_path, dest_path) tuples sorted by dest_path (for stable
ordering within output .apk).
"""
ret = []
for path in paths:
src_path, dest_path = _SplitAssetPath(path)
if os.path.isdir(src_path):
for f in build_utils.FindInDirectory(src_path, '*'):
ret.append((f, os.path.join(dest_path, f[len(src_path) + 1:])))
else:
ret.append((src_path, dest_path))
ret.sort(key=lambda t:t[1])
return ret
def _GetAssetsToAdd(path_tuples,
fast_align,
disable_compression=False,
allow_reads=True):
"""Returns the list of file_detail tuples for assets in the apk.
Args:
path_tuples: List of src_path, dest_path tuples to add.
fast_align: Whether to perform alignment in python zipfile (alternatively
alignment can be done using the zipalign utility out of band).
disable_compression: Whether to disable compression.
allow_reads: If false, we do not try to read the files from disk (to find
their size for example).
Returns: A list of (src_path, apk_path, compress, alignment) tuple
representing what and how assets are added.
"""
assets_to_add = []
# Group all uncompressed assets together in the hope that it will increase
# locality of mmap'ed files.
for target_compress in (False, True):
for src_path, dest_path in path_tuples:
compress = not disable_compression and (
os.path.splitext(src_path)[1] not in _NO_COMPRESS_EXTENSIONS)
if target_compress == compress:
# AddToZipHermetic() uses this logic to avoid growing small files.
# We need it here in order to set alignment correctly.
if allow_reads and compress and os.path.getsize(src_path) < 16:
compress = False
apk_path = 'assets/' + dest_path
alignment = 0 if compress and not fast_align else 4
assets_to_add.append((apk_path, src_path, compress, alignment))
return assets_to_add
def _AddFiles(apk, details):
"""Adds files to the apk.
Args:
apk: path to APK to add to.
details: A list of file detail tuples (src_path, apk_path, compress,
alignment) representing what and how files are added to the APK.
"""
for apk_path, src_path, compress, alignment in details:
# This check is only relevant for assets, but it should not matter if it is
# checked for the whole list of files.
try:
apk.getinfo(apk_path)
# Should never happen since write_build_config.py handles merging.
raise Exception(
'Multiple targets specified the asset path: %s' % apk_path)
except KeyError:
zipalign.AddToZipHermetic(
apk,
apk_path,
src_path=src_path,
compress=compress,
alignment=alignment)
def _GetNativeLibrariesToAdd(native_libs, android_abi, uncompress, fast_align,
lib_always_compress, lib_renames):
"""Returns the list of file_detail tuples for native libraries in the apk.
Returns: A list of (src_path, apk_path, compress, alignment) tuple
representing what and how native libraries are added.
"""
libraries_to_add = []
for path in native_libs:
basename = os.path.basename(path)
compress = not uncompress or any(lib_name in basename
for lib_name in lib_always_compress)
rename = any(lib_name in basename for lib_name in lib_renames)
if rename:
basename = 'crazy.' + basename
lib_android_abi = android_abi
if path.startswith('android_clang_arm64_hwasan/'):
lib_android_abi = 'arm64-v8a-hwasan'
apk_path = 'lib/%s/%s' % (lib_android_abi, basename)
alignment = 0 if compress and not fast_align else 0x1000
libraries_to_add.append((apk_path, path, compress, alignment))
return libraries_to_add
def _CreateExpectationsData(native_libs, assets):
"""Creates list of native libraries and assets."""
native_libs = sorted(native_libs)
assets = sorted(assets)
ret = []
for apk_path, _, compress, alignment in native_libs + assets:
ret.append('apk_path=%s, compress=%s, alignment=%s\n' %
(apk_path, compress, alignment))
return ''.join(ret)
def main(args):
build_utils.InitLogging('APKBUILDER_DEBUG')
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
# Until Python 3.7, there's no better way to set compression level.
# The default is 6.
if options.best_compression:
# Compresses about twice as slow as the default.
zlib.Z_DEFAULT_COMPRESSION = 9
else:
# Compresses about twice as fast as the default.
zlib.Z_DEFAULT_COMPRESSION = 1
# Manually align only when alignment is necessary.
# Python's zip implementation duplicates file comments in the central
# directory, whereas zipalign does not, so use zipalign for official builds.
fast_align = options.format == 'apk' and not options.best_compression
native_libs = sorted(options.native_libs)
# Include native libs in the depfile_deps since GN doesn't know about the
# dependencies when is_component_build=true.
depfile_deps = list(native_libs)
# For targets that depend on static library APKs, dex paths are created by
# the static library's dexsplitter target and GN doesn't know about these
# paths.
if options.dex_file:
depfile_deps.append(options.dex_file)
secondary_native_libs = []
if options.secondary_native_libs:
secondary_native_libs = sorted(options.secondary_native_libs)
depfile_deps += secondary_native_libs
if options.java_resources:
# Included via .build_config, so need to write it to depfile.
depfile_deps.extend(options.java_resources)
assets = _ExpandPaths(options.assets)
uncompressed_assets = _ExpandPaths(options.uncompressed_assets)
# Included via .build_config, so need to write it to depfile.
depfile_deps.extend(x[0] for x in assets)
depfile_deps.extend(x[0] for x in uncompressed_assets)
# Bundle modules have a structure similar to APKs, except that resources
# are compiled in protobuf format (instead of binary xml), and that some
# files are located into different top-level directories, e.g.:
# AndroidManifest.xml -> manifest/AndroidManifest.xml
# classes.dex -> dex/classes.dex
# res/ -> res/ (unchanged)
# assets/ -> assets/ (unchanged)
# <other-file> -> root/<other-file>
#
# Hence, the following variables are used to control the location of files in
# the final archive.
if options.format == 'bundle-module':
apk_manifest_dir = 'manifest/'
apk_root_dir = 'root/'
apk_dex_dir = 'dex/'
else:
apk_manifest_dir = ''
apk_root_dir = ''
apk_dex_dir = ''
def _GetAssetDetails(assets, uncompressed_assets, fast_align, allow_reads):
ret = _GetAssetsToAdd(assets,
fast_align,
disable_compression=False,
allow_reads=allow_reads)
ret.extend(
_GetAssetsToAdd(uncompressed_assets,
fast_align,
disable_compression=True,
allow_reads=allow_reads))
return ret
libs_to_add = _GetNativeLibrariesToAdd(
native_libs, options.android_abi, options.uncompress_shared_libraries,
fast_align, options.library_always_compress, options.library_renames)
if options.secondary_android_abi:
libs_to_add.extend(
_GetNativeLibrariesToAdd(
secondary_native_libs, options.secondary_android_abi,
options.uncompress_shared_libraries, fast_align,
options.library_always_compress, options.library_renames))
if options.expected_file:
# We compute expectations without reading the files. This allows us to check
# expectations for different targets by just generating their build_configs
# and not have to first generate all the actual files and all their
# dependencies (for example by just passing --only-verify-expectations).
asset_details = _GetAssetDetails(assets,
uncompressed_assets,
fast_align,
allow_reads=False)
actual_data = _CreateExpectationsData(libs_to_add, asset_details)
diff_utils.CheckExpectations(actual_data, options)
if options.only_verify_expectations:
if options.depfile:
build_utils.WriteDepfile(options.depfile,
options.actual_file,
inputs=depfile_deps)
return
# If we are past this point, we are going to actually create the final apk so
# we should recompute asset details again but maybe perform some optimizations
# based on the size of the files on disk.
assets_to_add = _GetAssetDetails(
assets, uncompressed_assets, fast_align, allow_reads=True)
# Targets generally do not depend on apks, so no need for only_if_changed.
with build_utils.AtomicOutput(options.output_apk, only_if_changed=False) as f:
with zipfile.ZipFile(options.resource_apk) as resource_apk, \
zipfile.ZipFile(f, 'w') as out_apk:
def add_to_zip(zip_path, data, compress=True, alignment=4):
zipalign.AddToZipHermetic(
out_apk,
zip_path,
data=data,
compress=compress,
alignment=0 if compress and not fast_align else alignment)
def copy_resource(zipinfo, out_dir=''):
add_to_zip(
out_dir + zipinfo.filename,
resource_apk.read(zipinfo.filename),
compress=zipinfo.compress_type != zipfile.ZIP_STORED)
# Make assets come before resources in order to maintain the same file
# ordering as GYP / aapt. http://crbug.com/561862
resource_infos = resource_apk.infolist()
# 1. AndroidManifest.xml
logging.debug('Adding AndroidManifest.xml')
copy_resource(
resource_apk.getinfo('AndroidManifest.xml'), out_dir=apk_manifest_dir)
# 2. Assets
logging.debug('Adding assets/')
_AddFiles(out_apk, assets_to_add)
# 3. Dex files
logging.debug('Adding classes.dex')
if options.dex_file:
with open(options.dex_file, 'rb') as dex_file_obj:
if options.dex_file.endswith('.dex'):
max_dex_number = 1
# This is the case for incremental_install=true.
add_to_zip(
apk_dex_dir + 'classes.dex',
dex_file_obj.read(),
compress=not options.uncompress_dex)
else:
max_dex_number = 0
with zipfile.ZipFile(dex_file_obj) as dex_zip:
for dex in (d for d in dex_zip.namelist() if d.endswith('.dex')):
max_dex_number += 1
add_to_zip(
apk_dex_dir + dex,
dex_zip.read(dex),
compress=not options.uncompress_dex)
if options.jdk_libs_dex_file:
with open(options.jdk_libs_dex_file, 'rb') as dex_file_obj:
add_to_zip(
apk_dex_dir + 'classes{}.dex'.format(max_dex_number + 1),
dex_file_obj.read(),
compress=not options.uncompress_dex)
# 4. Native libraries.
logging.debug('Adding lib/')
_AddFiles(out_apk, libs_to_add)
# Add a placeholder lib if the APK should be multi ABI but is missing libs
# for one of the ABIs.
native_lib_placeholders = options.native_lib_placeholders
secondary_native_lib_placeholders = (
options.secondary_native_lib_placeholders)
if options.is_multi_abi:
if ((secondary_native_libs or secondary_native_lib_placeholders)
and not native_libs and not native_lib_placeholders):
native_lib_placeholders += ['libplaceholder.so']
if ((native_libs or native_lib_placeholders)
and not secondary_native_libs
and not secondary_native_lib_placeholders):
secondary_native_lib_placeholders += ['libplaceholder.so']
# Add placeholder libs.
for name in sorted(native_lib_placeholders):
# Note: Empty libs files are ignored by md5check (can cause issues
# with stale builds when the only change is adding/removing
# placeholders).
apk_path = 'lib/%s/%s' % (options.android_abi, name)
add_to_zip(apk_path, '', alignment=0x1000)
for name in sorted(secondary_native_lib_placeholders):
# Note: Empty libs files are ignored by md5check (can cause issues
# with stale builds when the only change is adding/removing
# placeholders).
apk_path = 'lib/%s/%s' % (options.secondary_android_abi, name)
add_to_zip(apk_path, '', alignment=0x1000)
# 5. Resources
logging.debug('Adding res/')
for info in sorted(resource_infos, key=lambda i: i.filename):
if info.filename != 'AndroidManifest.xml':
copy_resource(info)
# 6. Java resources that should be accessible via
# Class.getResourceAsStream(), in particular parts of Emma jar.
# Prebuilt jars may contain class files which we shouldn't include.
logging.debug('Adding Java resources')
for java_resource in options.java_resources:
with zipfile.ZipFile(java_resource, 'r') as java_resource_jar:
for apk_path in sorted(java_resource_jar.namelist()):
apk_path_lower = apk_path.lower()
if apk_path_lower.startswith('meta-inf/'):
continue
if apk_path_lower.endswith('/'):
continue
if apk_path_lower.endswith('.class'):
continue
add_to_zip(apk_root_dir + apk_path,
java_resource_jar.read(apk_path))
if options.format == 'apk':
zipalign_path = None if fast_align else options.zipalign_path
finalize_apk.FinalizeApk(options.apksigner_jar,
zipalign_path,
f.name,
f.name,
options.key_path,
options.key_passwd,
options.key_name,
int(options.min_sdk_version),
warnings_as_errors=options.warnings_as_errors)
logging.debug('Moving file into place')
if options.depfile:
build_utils.WriteDepfile(options.depfile,
options.output_apk,
inputs=depfile_deps)
if __name__ == '__main__':
main(sys.argv[1:])
| youtube/cobalt | build/android/gyp/apkbuilder.py | Python | bsd-3-clause | 22,278 |
import urllib.request, urllib.parse, urllib.error
from oauth2 import Request as OAuthRequest, SignatureMethod_HMAC_SHA1
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend, BaseOAuth2
from social_auth.utils import dsa_urlopen
class RdioBaseBackend(OAuthBackend):
def get_user_id(self, details, response):
return response['key']
def get_user_details(self, response):
return {
'username': response['username'],
'first_name': response['firstName'],
'last_name': response['lastName'],
'fullname': response['displayName'],
}
class RdioOAuth1Backend(RdioBaseBackend):
"""Rdio OAuth authentication backend"""
name = 'rdio-oauth1'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
]
@classmethod
def tokens(cls, instance):
token = super(RdioOAuth1Backend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(tok.split('=')
for tok in token['access_token'].split('&'))
return token
class RdioOAuth2Backend(RdioBaseBackend):
name = 'rdio-oauth2'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
('refresh_token', 'refresh_token', True),
('token_type', 'token_type', True),
]
class RdioOAuth1(ConsumerBasedOAuth):
AUTH_BACKEND = RdioOAuth1Backend
REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token'
AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token'
RDIO_API_BASE = 'http://api.rdio.com/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH1_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH1_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
}
request = self.oauth_post_request(access_token, self.RDIO_API_BASE,
params=params)
response = dsa_urlopen(request.url, request.to_postdata())
json = '\n'.join(response.readlines())
try:
return simplejson.loads(json)['result']
except ValueError:
return None
def oauth_post_request(self, token, url, params):
"""Generate OAuth request, setups callback url"""
if 'oauth_verifier' in self.data:
params['oauth_verifier'] = self.data['oauth_verifier']
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params,
http_method='POST')
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
class RdioOAuth2(BaseOAuth2):
AUTH_BACKEND = RdioOAuth2Backend
AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token'
RDIO_API_BASE = 'https://www.rdio.com/api/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH2_SECRET'
SCOPE_VAR_NAME = 'RDIO2_PERMISSIONS'
EXTRA_PARAMS_VAR_NAME = 'RDIO2_EXTRA_PARAMS'
def user_data(self, access_token, *args, **kwargs):
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
'access_token': access_token,
}
response = dsa_urlopen(self.RDIO_API_BASE, urllib.parse.urlencode(params))
try:
return simplejson.load(response)['result']
except ValueError:
return None
# Backend definition
BACKENDS = {
'rdio-oauth1': RdioOAuth1,
'rdio-oauth2': RdioOAuth2
}
| limdauto/django-social-auth | social_auth/backends/contrib/rdio.py | Python | bsd-3-clause | 4,358 |
import csv
import time
from datetime import timedelta
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse, HttpResponseForbidden, JsonResponse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.utils.html import format_html
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from formtools.wizard.views import SessionWizardView
from .models import Petition, Signature, Organization, PytitionUser, PetitionTemplate, Permission
from .models import SlugModel
from .forms import SignatureForm, ContentFormPetition, EmailForm, NewsletterForm, SocialNetworkForm, ContentFormTemplate
from .forms import StyleForm, PetitionCreationStep1, PetitionCreationStep2, PetitionCreationStep3, UpdateInfoForm
from .forms import DeleteAccountForm, OrgCreationForm
from .helpers import get_client_ip, get_session_user, petition_from_id
from .helpers import check_petition_is_accessible
from .helpers import send_confirmation_email, subscribe_to_newsletter
from .helpers import get_update_form, petition_detail_meta
#------------------------------------ Views -----------------------------------
# Path : /
# Depending on the settings.INDEX_PAGE, show a list of petitions or
# redirect to an user/org profile page
def index(request):
petitions = Petition.objects.filter(published=True).order_by('-id')[:12]
if not hasattr(settings, 'INDEX_PAGE'):
raise Http404(_("You must set an INDEX_PAGE config in your settings"))
if settings.INDEX_PAGE == 'USER_PROFILE':
try:
user_name = settings.INDEX_PAGE_USER
except:
raise Http404(_("You must set an INDEX_PAGE_USER config in your settings"))
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
try:
org_name = settings.INDEX_PAGE_ORGA
except:
raise Http404(_("You must set an INDEX_PAGE_ORGA config in your settings"))
if settings.INDEX_PAGE == 'ALL_PETITIONS':
return redirect("all_petitions")
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
org = Organization.objects.get(name=org_name)
return redirect("org_profile", org.slugname)
elif settings.INDEX_PAGE == 'USER_PROFILE':
return redirect("user_profile", user_name)
elif settings.INDEX_PAGE == 'LOGIN_REGISTER':
if request.user.is_authenticated:
return redirect("user_dashboard")
else:
return redirect("login")
else:
authenticated = request.user.is_authenticated
if authenticated:
user = get_session_user(request)
else:
user = request.user
return render(request, 'petition/index.html',
{
'user': user,
'petitions': petitions
}
)
# /all_petitions
# Show all the petitions in the database
def all_petitions(request):
petitions = Petition.objects.filter(published=True).all()
return render(request, 'petition/all_petitions.html',
{'petitions': petitions})
# /search?q=QUERY
# Show results of a search query
def search(request):
q = request.GET.get('q', '')
if q != "":
petitions = Petition.objects.filter(Q(title__icontains=q) | Q(text__icontains=q)).filter(published=True)[:15]
orgs = Organization.objects.filter(name__icontains=q)
else:
petitions = Petition.objects.filter(published=True)[:15]
orgs = []
return render(
request, 'petition/search.html',
{
'petitions': petitions,
'orgs': orgs,
'q': q
}
)
# /<int:petition_id>/
# Show information on a petition
def detail(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, 'petition': petition, 'form': sign_form,
'meta': petition_detail_meta(request, petition_id)}
return render(request, 'petition/petition_detail.html', ctx)
# /<int:petition_id>/confirm/<confirmation_hash>
# Confirm signature to a petition
def confirm(request, petition_id, confirmation_hash):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
successmsg = petition.confirm_signature(confirmation_hash)
if successmsg is None:
messages.error(request, _("Error: This confirmation code is invalid. Maybe you\'ve already confirmed?"))
else:
messages.success(request, successmsg)
except ValidationError as e:
messages.error(request, _(e.message))
except Signature.DoesNotExist:
messages.error(request, _("Error: This confirmation code is invalid."))
return redirect(petition.url)
# <int:petition_id>/get_csv_signature
# <int:petition_id>/get_csv_confirmed_signature
# returns the CSV files of the list of signatures
@login_required
def get_csv_signature(request, petition_id, only_confirmed):
user = get_session_user(request)
try:
petition = Petition.objects.get(pk=petition_id)
except Petition.DoesNotExist:
return JsonResponse({}, status=404)
if petition.owner_type == "org":
if not petition.org.is_allowed_to(user, "can_view_signatures"):
return JsonResponse({}, status=403)
filename = '{}.csv'.format(petition)
signatures = Signature.objects.filter(petition = petition)
if only_confirmed:
signatures = signatures.filter(confirmed = True)
else:
signatures = signatures.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename={}'.format(filename).replace('\r\n', '').replace(' ', '%20')
writer = csv.writer(response)
attrs = ['first_name', 'last_name', 'phone', 'email', 'subscribed_to_mailinglist', 'confirmed']
writer.writerow(attrs)
for signature in signatures:
values = [getattr(signature, field) for field in attrs]
writer.writerow(values)
return response
# resend/<int:signature_id>
# resend the signature confirmation email
@login_required
def go_send_confirmation_email(request, signature_id):
app_label = Signature._meta.app_label
signature = Signature.objects.filter(pk=signature_id).get()
send_confirmation_email(request, signature)
return redirect('admin:{}_signature_change'.format(app_label), signature_id)
# <int:petition_id>/sign
# Sign a petition
def create_signature(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
if request.method == "POST":
form = SignatureForm(petition=petition, data=request.POST)
if not form.is_valid():
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
ipaddr = make_password(
get_client_ip(request),
salt=petition.salt.encode('utf-8'))
since = now() - timedelta(seconds=settings.SIGNATURE_THROTTLE_TIMING)
signatures = Signature.objects.filter(
petition=petition,
ipaddress=ipaddr,
date__gt=since)
if signatures.count() > settings.SIGNATURE_THROTTLE:
messages.error(request, _("Too many signatures from your IP address, please try again later."))
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
else:
signature = form.save()
signature.ipaddress = ipaddr
signature.save()
send_confirmation_email(request, signature)
messages.success(request,
format_html(_("Thank you for signing this petition, an email has just been sent to you at your address \'{}\'" \
" in order to confirm your signature.<br>" \
"You will need to click on the confirmation link in the email.<br>" \
"If you cannot find the email in your Inbox, please have a look in your Spam box.")\
, signature.email))
if petition.has_newsletter and signature.subscribed_to_mailinglist:
subscribe_to_newsletter(petition, signature.email)
return redirect(petition.url)
# /org/<slug:orgslugname>/dashboard
# Show the dashboard of an organization
@login_required
def org_dashboard(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(request, _("This organization does not exist: '{}'".format(orgslugname)))
return redirect("user_dashboard")
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization: '{}'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)))
return redirect("user_dashboard")
can_create_petition = org.is_allowed_to(pytitionuser, "can_create_petitions")
petitions = org.petition_set.all()
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
return render(request, 'petition/org_dashboard.html',
{'org': org, 'user': pytitionuser, "other_orgs": other_orgs,
'petitions': petitions, 'user_permissions': permissions,
'can_create_petition': can_create_petition})
# /user/dashboard
# Dashboard of the logged in user
@login_required
def user_dashboard(request):
user = get_session_user(request)
petitions = user.petition_set.all()
return render(
request,
'petition/user_dashboard.html',
{'user': user, 'petitions': petitions, 'can_create_petition': True}
)
# /user/<user_name>
# Show the user profile
def user_profile(request, user_name):
try:
user = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
raise Http404(_("not found"))
ctx = {'user': user,
'petitions': user.petition_set.filter(published=True)}
return render(request, 'petition/user_profile.html', ctx)
# /org/<slug:orgslugname>/leave_org
# User is leaving the organisation
@login_required
def leave_org(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
raise Http404(_("not found"))
with transaction.atomic():
if org.is_last_admin(pytitionuser):
messages.error(request, _('Impossible to leave this organisation, you are the last administrator'))
return redirect(reverse('account_settings') + '#a_org_form')
elif org.members.count() == 1:
messages.error(request, _('Impossible to leave this organisation, you are the last member'))
return redirect(reverse('account_settings') + '#a_org_form')
else:
org.members.remove(pytitionuser)
return redirect('account_settings')
# /org/<slug:orgslugname>
# Show the profile of an organization
def org_profile(request, orgslugname):
try:
user = get_session_user(request)
except:
user = None
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
ctx = {'org': org,
'petitions': org.petition_set.filter(published=True)}
# if a user is logged-in, put it in the context, it will feed the navbar dropdown
if user is not None:
ctx['user'] = user
return render(request, "petition/org_profile.html", ctx)
# /get_user_list
# get the list of users
@login_required
def get_user_list(request):
q = request.GET.get('q', '')
if q != "":
users = PytitionUser.objects.filter(Q(user__username__contains=q) | Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q)).all()
else:
users = []
userdict = {
"values": [user.user.username for user in users],
}
return JsonResponse(userdict)
# PATH : org/<slug:orgslugname>/add_user
# Add an user to an organization
@login_required
def org_add_user(request, orgslugname):
adduser = request.GET.get('user', '')
try:
adduser = PytitionUser.objects.get(user__username=adduser)
except PytitionUser.DoesNotExist:
message = _("This user does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
message = _("This organization does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
pytitionuser = get_session_user(request)
if org not in pytitionuser.organization_set.all():
message = _("You are not part of this organization.")
return JsonResponse({"message": message}, status=403)
if org in adduser.organization_set.all():
message = _("User is already member of {orgname} organization".format(orgname=org.name))
return JsonResponse({"message": message}, status=500)
if not org.is_allowed_to(pytitionuser, "can_add_members"):
message = _("You are not allowed to invite new members into this organization.")
return JsonResponse({"message": message}, status=403)
try:
adduser.invitations.add(org)
adduser.save()
except:
message = _("An error occured")
return JsonResponse({"message": message}, status=500)
message = _("You invited {username} to join {orgname}".format(username=adduser.name, orgname=org.name))
return JsonResponse({"message": message})
# /org/<slug:orgslugname>/invite_accept
# Accept an invitation to an organisation
# Called from /user/dashboard
@login_required
def invite_accept(request, orgslugname):
if orgslugname == "":
return HttpResponse(status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
with transaction.atomic():
pytitionuser.invitations.remove(org)
org.members.add(pytitionuser)
except:
return HttpResponse(status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/invite_dismiss
# Dismiss the invitation to an organisation
@login_required
def invite_dismiss(request, orgslugname):
if orgslugname == "":
return JsonResponse({}, status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
pytitionuser.invitations.remove(org)
except:
return JsonResponse({}, status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/new_template
# /user/new_template
# Create a new template
@login_required
def new_template(request, orgslugname=None):
pytitionuser = get_session_user(request)
ctx = {'user': pytitionuser}
if orgslugname:
redirection = "org_new_template"
try:
org = Organization.objects.get(slugname=orgslugname)
ctx['org'] = org
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in pytitionuser.organization_set.all():
return HttpResponseForbidden(_("You are not allowed to view this organization dashboard"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
ctx['user_permissions'] = permissions
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
if not permissions.can_create_templates:
return HttpResponseForbidden(_("You don't have the permission to create a Template in this organization"))
ctx['base_template'] = 'petition/org_base.html'
else:
redirection = "user_new_template"
ctx['base_template'] = 'petition/user_base.html'
if request.method == "POST":
template_name = request.POST.get('template_name', '')
if template_name != '':
if orgslugname:
template = PetitionTemplate(name=template_name, org=org)
else:
template = PetitionTemplate(name=template_name, user=pytitionuser)
template.save()
return redirect("edit_template", template.id)
else:
messages.error(request, _("You need to provide a template name."))
return redirect(redirection)
else:
return render(request, "petition/new_template.html", ctx)
# /templates/<int:template_id>/edit
# Edit a petition template
@login_required
def edit_template(request, template_id):
id = template_id
if id == '':
return HttpResponseForbidden(_("You need to provide the template id to modify"))
try:
template = PetitionTemplate.objects.get(pk=id)
except PetitionTemplate.DoesNotExist:
raise Http404(_("This template does not exist"))
pytitionuser = get_session_user(request)
context = {'user': pytitionuser}
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
try:
permissions = Permission.objects.get(organization=owner, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=owner.name)), status=500)
context['user_permissions'] = permissions
if owner not in pytitionuser.organization_set.all() or not permissions.can_modify_templates:
return HttpResponseForbidden(_("You are not allowed to edit this organization's templates"))
context['org'] = owner
base_template = "petition/org_base.html"
else:
if owner != pytitionuser:
return HttpResponseForbidden(_("You are not allowed to edit this user's templates"))
base_template = "petition/user_base.html"
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
'style_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
content_form = ContentFormTemplate(request.POST)
submitted_ctx['content_form_submitted'] = True
if content_form.is_valid():
template.name = content_form.cleaned_data['name']
template.text = content_form.cleaned_data['text']
template.side_text = content_form.cleaned_data['side_text']
template.footer_text = content_form.cleaned_data['footer_text']
template.footer_links = content_form.cleaned_data['footer_links']
template.sign_form_footer = content_form.cleaned_data['sign_form_footer']
template.save()
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
if 'email_form_submitted' in request.POST:
email_form = EmailForm(request.POST)
submitted_ctx['email_form_submitted'] = True
if email_form.is_valid():
template.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
template.save()
else:
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
social_network_form = SocialNetworkForm(request.POST)
submitted_ctx['social_network_form_submitted'] = True
if social_network_form.is_valid():
template.twitter_description = social_network_form.cleaned_data['twitter_description']
template.twitter_image = social_network_form.cleaned_data['twitter_image']
template.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
template.save()
else:
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
newsletter_form = NewsletterForm(request.POST)
submitted_ctx['newsletter_form_submitted'] = True
if newsletter_form.is_valid():
template.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
template.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
template.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
template.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
template.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
template.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
template.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
template.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
template.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
template.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
template.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
template.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
template.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
template.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
template.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
template.save()
else:
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
template.bgcolor = style_form.cleaned_data['bgcolor']
template.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
template.gradient_from = style_form.cleaned_data['gradient_from']
template.gradient_to = style_form.cleaned_data['gradient_to']
template.save()
else:
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
ctx = {'content_form': content_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'style_form': style_form,
'petition': template}
context['base_template'] = base_template
context.update(ctx)
context.update(submitted_ctx)
return render(request, "petition/edit_template.html", context)
# /templates/<int:template_id>/delete
# Delete a template
@login_required
def template_delete(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except:
return JsonResponse({}, status=404)
if template.owner_type == "org":
if not pytitionuser in template.org.members.all():
return JsonResponse({}, status=403) # User not in organization
try:
permissions = Permission.objects.get(
organization=template.org,
user=pytitionuser)
except Permission.DoesNotExist:
return JsonResponse({}, status=500) # No permission? fatal error!
if not permissions.can_delete_templates:
return JsonResponse({}, status=403) # User does not have the permission!
else:
if pytitionuser != template.user:
return JsonResponse({}, status=403) # User cannot delete a template if it's not his
template.delete()
return JsonResponse({})
# /templates/<int:template_id>/fav
# Set a template as favourite
@login_required
def template_fav_toggle(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except PetitionTemplate.DoesNotExist:
return JsonResponse({}, status=404)
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
if owner not in pytitionuser.organization_set.all():
return JsonResponse({}, status=403) # Forbidden
else:
if owner != pytitionuser:
return JsonResponse({'msg': _("You are not allowed to change this user's default template")}, status=403)
if owner.default_template == template:
owner.default_template = None
else:
owner.default_template = template
owner.save()
return JsonResponse({})
# /org/<slug:orgslugname>/delete_member
# Remove a member from an organization
@login_required
def org_delete_member(request, orgslugname):
member_name = request.GET.get('member', '')
try:
member = PytitionUser.objects.get(user__username=member_name)
except PytitionUser.DoesNotExist:
raise Http404(_("User does not exist"))
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if pytitionuser not in org.members.all():
return JsonResponse({}, status=403) # Forbidden
try:
permissions = Permission.objects.get(user=pytitionuser, organization=org)
except Permission.DoesNoeExist:
return JsonResponse({}, status=500)
if permissions.can_remove_members or pytitionuser == member:
if org in member.organization_set.all():
if org.is_last_admin(member):
return JsonResponse({}, status=403) # Forbidden
member.organization_set.remove(org)
else:
return JsonResponse({}, status=404)
else:
return JsonResponse({}, status=403) # Forbidden
return JsonResponse({}, status=200)
# PATH : org/<slug:orgslugname>/edit_user_permissions/<slug:user_name>
# Show a webpage to edit permissions
@login_required
def org_edit_user_perms(request, orgslugname, user_name):
"""Shows the page which lists the user permissions."""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User '{name}' does not exist".format(name=user_name)))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization '{name}' does not exist".format(name=orgslugname)))
if org not in member.organization_set.all():
messages.error(request, _("The user '{username}' is not member of this organization ({orgname}).".
format(username=user_name, orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(organization=org, user=member)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, this member does not have permissions attached to this organization."))
return redirect("org_dashboard", org.slugname)
try:
user_permissions = Permission.objects.get(organization=org, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
return render(request, "petition/org_edit_user_perms.html",
{'org': org, 'member': member, 'user': pytitionuser,
'permissions': permissions,
'user_permissions': user_permissions})
# PATH /org/<slug:orgslugname>/set_user_permissions/<slug:user_name>
# Set a permission for an user
@login_required
def org_set_user_perms(request, orgslugname, user_name):
"""Actually do the modification of user permissions.
Data come from "org_edit_user_perms" view's form.
"""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User does not exist"))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in member.organization_set.all():
messages.error(request, _("This user is not part of organization \'{orgname}\'".format(orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(user=member, organization=org)
except Permission.DoesNotExist:
messages.error(request, _("Fatal error, this user does not have permissions attached for this organization"))
return redirect("org_dashboard", org.slugname)
try:
userperms = Permission.objects.get(user=pytitionuser, organization=org)
except:
messages.error(request, _("Fatal error, you don't have permissions attached to you for this organization"))
return redirect("org_dashboard", org.slugname)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization"))
return redirect("user_dashboard")
if not userperms.can_modify_permissions:
messages.error(request, _("You are not allowed to modify this organization members' permissions"))
return redirect("org_edit_user_perms", orgslugname, user_name)
if request.method == "POST":
error = False
post = request.POST
permissions.can_remove_members = post.get('can_remove_members', '') == 'on'
permissions.can_add_members = post.get('can_add_members', '') == 'on'
permissions.can_create_petitions = post.get('can_create_petitions', '') == 'on'
permissions.can_modify_petitions = post.get('can_modify_petitions', '') == 'on'
permissions.can_delete_petitions = post.get('can_delete_petitions', '') == 'on'
permissions.can_create_templates = post.get('can_create_templates', '') == 'on'
permissions.can_modify_templates = post.get('can_modify_templates', '') == 'on'
permissions.can_delete_templates = post.get('can_delete_templates', '') == 'on'
permissions.can_view_signatures = post.get('can_view_signatures', '') == 'on'
permissions.can_modify_signatures = post.get('can_modify_signatures', '') == 'on'
permissions.can_delete_signatures = post.get('can_delete_signatures', '') == 'on'
can_modify_perms = post.get('can_modify_permissions', '') == 'on'
with transaction.atomic():
# if user is dropping his own permissions
if not can_modify_perms and permissions.can_modify_permissions and pytitionuser == member:
# get list of people with can_modify_permissions permission on this org
owners = org.owners
if owners.count() > 1:
permissions.can_modify_permissions = can_modify_perms
else:
if org.members.count() > 1:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only one left who can do this. "
"Give the permission to someone else before removing yours."))
else:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only member left."))
if not error:
permissions.can_modify_permissions = can_modify_perms
messages.success(request, _("Permissions successfully changed!"))
permissions.save()
return redirect("org_edit_user_perms", orgslugname, user_name)
WizardTemplates = {"step1": "petition/new_petition_step1.html",
"step2": "petition/new_petition_step2.html",
"step3": "petition/new_petition_step3.html"}
WizardForms = [("step1", PetitionCreationStep1),
("step2", PetitionCreationStep2),
("step3", PetitionCreationStep3)]
# Class Based Controller
# PATH : subroutes of /wizard
@method_decorator(login_required, name='dispatch')
class PetitionCreationWizard(SessionWizardView):
def get_template_names(self):
return [WizardTemplates[self.steps.current]]
def get_form_initial(self, step):
if step == "step2":
use_template = False
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
org = Organization.objects.get(slugname=orgslugname)
else:
pytitionuser = get_session_user(self.request)
# Use a specific template if its id is given
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if org_petition:
if template in org.petitiontemplate_set.all():
return {'message': template.text}
else:
if template in pytitionuser.petitiontemplate_set.all():
return {'message': template.text}
# if no template id is given, check for default templates
if org_petition:
if org.default_template is not None:
template = org.default_template
use_template = True
elif pytitionuser.default_template is not None:
template = pytitionuser.default_template
use_template = True
if use_template:
return {'message': template.text}
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step=None):
if step == "step1":
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
kwargs = {"orgslugname": orgslugname}
else:
pytitionuser = get_session_user(self.request)
kwargs = {"user_name": pytitionuser.user.username}
return kwargs
else:
return {}
def done(self, form_list, **kwargs):
org_petition = "orgslugname" in self.kwargs
title = self.get_cleaned_data_for_step("step1")["title"]
message = self.get_cleaned_data_for_step("step2")["message"]
publish = self.get_cleaned_data_for_step("step3")["publish"]
pytitionuser = get_session_user(self.request)
_redirect = self.request.POST.get('redirect', '')
if org_petition:
orgslugname = self.kwargs['orgslugname']
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(self.request, _("Cannot find this organization"))
return redirect("user_dashboard")
#raise Http404(_("Organization does not exist"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return redirect("org_dashboard", orgslugname)
if pytitionuser in org.members.all() and permissions.can_create_petitions:
#FIXME I think new here is better than create
petition = Petition.objects.create(title=title, text=message, org=org)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in org.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to your organization"))
return redirect("org_dashboard", orgslugname)
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("org_dashboard", orgslugname)
else:
messages.error(self.request, _("You don't have the permission to create a new petition in this Organization"))
return redirect("org_dashboard", orgslugname)
else:
petition = Petition.objects.create(title=title, text=message, user=pytitionuser)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in pytitionuser.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to you"))
return redirect("user_dashboard")
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("user_dashboard")
def get_context_data(self, form, **kwargs):
org_petition = "orgslugname" in self.kwargs
context = super(PetitionCreationWizard, self).get_context_data(form=form, **kwargs)
if org_petition:
base_template = 'petition/org_base.html'
try:
org = Organization.objects.get(slugname=self.kwargs['orgslugname'])
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
else:
base_template = 'petition/user_base.html'
pytitionuser = get_session_user(self.request)
context.update({'user': pytitionuser,
'base_template': base_template})
if org_petition:
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
context.update({'org': org,
'user_permissions': permissions})
if self.steps.current == "step3":
context.update(self.get_cleaned_data_for_step("step1"))
context.update(self.get_cleaned_data_for_step("step2"))
return context
# /<int:petition_id>/delete
# Delete a petition
@login_required
def petition_delete(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else: # an organization owns the petition
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_delete_petitions:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
# /<int:petition_id>/publish
# Publish a petition
@login_required
def petition_publish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.publish()
return JsonResponse({})
else:
# Petition owned by someone else
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.publish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/unpublish
# Unpublish a petition
@login_required
def petition_unpublish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/edit
# Edit a petition
@login_required
def edit_petition(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if not petition.is_allowed_to_edit(pytitionuser):
messages.error(request, _("You are not allowed to edit this petition"))
return redirect("user_dashboard")
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
submitted_ctx['content_form_submitted'] = True
content_form = ContentFormPetition(request.POST)
if content_form.is_valid():
petition.title = content_form.cleaned_data['title']
petition.target = content_form.cleaned_data['target']
petition.text = content_form.cleaned_data['text']
petition.side_text = content_form.cleaned_data['side_text']
petition.footer_text = content_form.cleaned_data['footer_text']
petition.footer_links = content_form.cleaned_data['footer_links']
petition.sign_form_footer = content_form.cleaned_data['sign_form_footer']
petition.save()
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
if 'email_form_submitted' in request.POST:
submitted_ctx['email_form_submitted'] = True
email_form = EmailForm(request.POST)
if email_form.is_valid():
petition.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
petition.save()
else:
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
submitted_ctx['social_network_form_submitted'] = True
social_network_form = SocialNetworkForm(request.POST)
if social_network_form.is_valid():
petition.twitter_description = social_network_form.cleaned_data['twitter_description']
petition.twitter_image = social_network_form.cleaned_data['twitter_image']
petition.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
petition.save()
else:
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
submitted_ctx['newsletter_form_submitted'] = True
newsletter_form = NewsletterForm(request.POST)
if newsletter_form.is_valid():
petition.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
petition.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
petition.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
petition.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
petition.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
petition.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
petition.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
petition.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
petition.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
petition.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
petition.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
petition.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
petition.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
petition.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
petition.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
petition.save()
else:
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
petition.bgcolor = style_form.cleaned_data['bgcolor']
petition.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
petition.gradient_from = style_form.cleaned_data['gradient_from']
petition.gradient_to = style_form.cleaned_data['gradient_to']
petition.save()
else:
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
ctx = {'user': pytitionuser,
'content_form': content_form,
'style_form': style_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'petition': petition}
url_prefix = request.scheme + "://" + request.get_host()
if petition.owner_type == "org":
permissions = Permission.objects.get(organization=petition.org, user=pytitionuser)
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'org': petition.org,
'user_permissions': permissions,
'base_template': 'petition/org_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
else:
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'base_template': 'petition/user_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
ctx.update(submitted_ctx)
return render(request, "petition/edit_petition.html", ctx)
# /<int:petition_id>/show_signatures
# Show the signatures of a petition
@login_required
def show_signatures(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
ctx = {}
if petition.owner_type == "user":
base_template = 'petition/user_base.html'
else:
org = petition.org
base_template = 'petition/org_base.html'
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
if pytitionuser not in org.members.all():
messages.error(request, _("You are not member of the following organization: \'{}\'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request, _("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')".format(orgname=org.name)))
return redirect("user_dashboard")
if not permissions.can_view_signatures:
messages.error(request, _("You are not allowed to view signatures in this organization"))
return redirect("org_dashboard", org.slugname)
ctx.update({'org': org, 'other_orgs': other_orgs,
'user_permissions': permissions})
if request.method == "POST":
action = request.POST.get('action', '')
selected_signature_ids = request.POST.getlist('signature_id', '')
failed = False
if selected_signature_ids and action:
selected_signatures = Signature.objects.filter(pk__in=selected_signature_ids)
if action == "delete":
for s in selected_signatures:
pet = s.petition
if pet.org: # Petition is owned by an org, we check for rights
if pet.org.is_allowed_to(pytitionuser, 'can_delete_signatures'):
s.delete()
else:
failed = True
else: # Petition is owned by a user, we check it's the one asking for deletion
if pet.user == pytitionuser:
s.delete()
else:
failed = True
if failed:
messages.error(request, _("You don't have permission to delete some or all of selected signatures"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send":
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send-all":
selected_signatures = Signature.objects.filter(petition=petition)
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
return redirect("show_signatures", petition_id)
signatures = petition.signature_set.all()
ctx.update({'petition': petition, 'user': pytitionuser,
'base_template': base_template,
'signatures': signatures})
return render(request, "petition/signature_data.html", ctx)
# /account_settings
# Show settings for the user accounts
@login_required
def account_settings(request):
pytitionuser = get_session_user(request)
submitted_ctx = {
'update_info_form_submitted': False,
'delete_account_form_submitted': False,
'password_change_form_submitted': False
}
if request.method == "POST":
if 'update_info_form_submitted' in request.POST:
update_info_form = UpdateInfoForm(pytitionuser.user, request.POST)
submitted_ctx['update_info_form_submitted'] = True
if update_info_form.is_valid():
update_info_form.save()
else:
update_info_form = get_update_form(pytitionuser.user)
if 'delete_account_form_submitted' in request.POST:
delete_account_form = DeleteAccountForm(request.POST)
submitted_ctx['delete_account_form_submitted'] = True
if delete_account_form.is_valid():
pytitionuser.drop()
return redirect("index")
else:
delete_account_form = DeleteAccountForm()
if 'password_change_form_submitted' in request.POST:
password_change_form = PasswordChangeForm(pytitionuser.user, request.POST)
submitted_ctx['password_change_form_submitted'] = True
if password_change_form.is_valid():
password_change_form.save()
messages.success(request, _("You successfully changed your password!"))
else:
password_change_form = PasswordChangeForm(pytitionuser.user)
else:
update_info_form = get_update_form(pytitionuser.user)
delete_account_form = DeleteAccountForm()
password_change_form = PasswordChangeForm(pytitionuser.user)
orgs = pytitionuser.organization_set.all()
# Checking if the user is allowed to leave the organisation
for org in orgs:
if org.members.count() < 2:
org.leave = False
else:
# More than one user, we need to check owners
owners = org.owners.all()
if owners.count() == 1 and pytitionuser in owners:
org.leave = False
else:
org.leave = True
ctx = {'user': pytitionuser,
'update_info_form': update_info_form,
'delete_account_form': delete_account_form,
'password_change_form': password_change_form,
'base_template': 'petition/user_base.html',
'orgs': orgs}
ctx.update(submitted_ctx)
return render(request, "petition/account_settings.html", ctx)
# GET/POST /org/create
# Create a new organization
@login_required
def org_create(request):
user = get_session_user(request)
ctx = {'user': user}
if request.method == "POST":
form = OrgCreationForm(request.POST)
if form.is_valid():
org = form.save()
org.members.add(user)
perm = Permission.objects.get(organization=org)
perm.set_all(True)
messages.success(request, _("You successfully created organization '{}'".format(org.name)))
return redirect('user_dashboard')
else:
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
form = OrgCreationForm()
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
# GET /org/<slug:orgslugname>/<slug:petitionname>
# Show a petition
def slug_show_petition(request, orgslugname=None, username=None, petitionname=None):
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
if orgslugname:
try:
org = Organization.objects.get(slugname=orgslugname)
slug = SlugModel.objects.get(slug=petitionname, petition__org=org)
except (Organization.DoesNotExist, SlugModel.DoesNotExist):
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
else:
try:
user = PytitionUser.objects.get(user__username=username)
slug = SlugModel.objects.get(slug=petitionname, petition__user=user)
except PytitionUser.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
except SlugModel.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, "petition": petition, "form": sign_form,
'meta': petition_detail_meta(request, petition.id)}
return render(request, "petition/petition_detail.html", ctx)
# /<int:petition_id>/add_new_slug
# Add a new slug for a petition
@login_required
def add_new_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if request.method == "POST":
slugtexts = request.POST.getlist('slugtext', '')
if slugtexts == '' or slugtexts == []:
messages.error(request, _("You entered an empty slug text"))
else:
if petition.is_allowed_to_edit(pytitionuser):
for slugtext in slugtexts:
try:
petition.add_slug(slugtext)
petition.save()
messages.success(request, _("Successful addition of the slug '{}'!".format(slugtext)))
except IntegrityError:
messages.error(request, _("The slug '{}' already exists!".format(slugtext)))
except ValidationError as v:
for message in v.messages:
messages.error(request, message)
else:
messages.error(request, _("You don't have the permission to modify petitions"))
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
else:
return redirect("user_dashboard")
# /<int:petition_id>/del_slug
# Remove a slug from a petition
@login_required
def del_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if petition.is_allowed_to_edit(pytitionuser):
slug_id = request.GET.get('slugid', None)
if not slug_id:
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
slug = SlugModel.objects.get(pk=slug_id)
petition.del_slug(slug)
petition.save()
messages.success(request, _("Successful deletion of a slug"))
else:
messages.error(request, _("You don't have the permission to modify petitions"))
if petition.owner_type == "org":
return redirect("org_dashboard", petition.owner.slugname)
else:
return redirect("user_dashboard")
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
| fallen/Pytition | pytition/petition/views.py | Python | bsd-3-clause | 64,345 |
real = complex(1, 1).real
imag = complex(1, 1).imag
print(real, imag)
| balarsen/Scrabble | scrabble/test.py | Python | bsd-3-clause | 72 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Unit tests for help command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.command import Command
import gslib.tests.testcase as testcase
class HelpUnitTests(testcase.GsUtilUnitTestCase):
"""Help command unit test suite."""
def test_help_noargs(self):
stdout = self.RunCommand('help', return_stdout=True)
self.assertIn(b'Available commands', stdout)
def test_help_subcommand_arg(self):
stdout = self.RunCommand('help', ['web', 'set'], return_stdout=True)
self.assertIn(b'gsutil web set', stdout)
self.assertNotIn(b'gsutil web get', stdout)
def test_help_invalid_subcommand_arg(self):
stdout = self.RunCommand('help', ['web', 'asdf'], return_stdout=True)
self.assertIn(b'help about one of the subcommands', stdout)
def test_help_with_subcommand_for_command_without_subcommands(self):
stdout = self.RunCommand('help', ['ls', 'asdf'], return_stdout=True)
self.assertIn(b'has no subcommands', stdout)
def test_help_command_arg(self):
stdout = self.RunCommand('help', ['ls'], return_stdout=True)
self.assertIn(b'ls - List providers, buckets', stdout)
def test_command_help_arg(self):
stdout = self.RunCommand('ls', ['--help'], return_stdout=True)
self.assertIn(b'ls - List providers, buckets', stdout)
def test_subcommand_help_arg(self):
stdout = self.RunCommand('web', ['set', '--help'], return_stdout=True)
self.assertIn(b'gsutil web set', stdout)
self.assertNotIn(b'gsutil web get', stdout)
def test_command_args_with_help(self):
stdout = self.RunCommand('cp', ['foo', 'bar', '--help'], return_stdout=True)
self.assertIn(b'cp - Copy files and objects', stdout)
class HelpIntegrationTests(testcase.GsUtilIntegrationTestCase):
"""Help command integration test suite."""
def test_help_wrong_num_args(self):
stderr = self.RunGsUtil(['cp'], return_stderr=True, expected_status=1)
self.assertIn('Usage:', stderr)
def test_help_runs_for_all_commands(self):
# This test is particularly helpful because the `help` command can fail
# under unusual circumstances (e.g. someone adds a new command and they make
# the "one-line" summary longer than the defined character limit).
for command in Command.__subclasses__():
# Raises exception if the exit code is non-zero.
self.RunGsUtil(['help', command.command_spec.command_name])
| endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/tests/test_help.py | Python | bsd-3-clause | 3,599 |
try: import cPickle as pickle
except: import pickle
from gem.evaluation import metrics
from gem.utils import evaluation_util, graph_util
import networkx as nx
import numpy as np
def evaluateStaticGraphReconstruction(digraph, graph_embedding,
X_stat, node_l=None, file_suffix=None,
sample_ratio_e=None, is_undirected=True,
is_weighted=False):
node_num = len(digraph.nodes)
# evaluation
if sample_ratio_e:
eval_edge_pairs = evaluation_util.getRandomEdgePairs(
node_num,
sample_ratio_e,
is_undirected
)
else:
eval_edge_pairs = None
if file_suffix is None:
estimated_adj = graph_embedding.get_reconstructed_adj(X_stat, node_l)
else:
estimated_adj = graph_embedding.get_reconstructed_adj(
X_stat,
file_suffix,
node_l
)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=eval_edge_pairs
)
MAP = metrics.computeMAP(predicted_edge_list, digraph, is_undirected=is_undirected)
prec_curv, _ = metrics.computePrecisionCurve(predicted_edge_list, digraph)
# If weighted, compute the error in reconstructed weights of observed edges
if is_weighted:
digraph_adj = nx.to_numpy_matrix(digraph)
estimated_adj[digraph_adj == 0] = 0
err = np.linalg.norm(digraph_adj - estimated_adj)
err_baseline = np.linalg.norm(digraph_adj)
else:
err = None
err_baseline = None
return (MAP, prec_curv, err, err_baseline)
def expGR(digraph, graph_embedding,
X, n_sampled_nodes, rounds,
res_pre, m_summ,
is_undirected=True):
print('\tGraph Reconstruction')
summ_file = open('%s_%s.grsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
if len(digraph.nodes) <= n_sampled_nodes:
rounds = 1
MAP = [None] * rounds
prec_curv = [None] * rounds
err = [None] * rounds
err_b = [None] * rounds
n_nodes = [None] * rounds
n_edges = [None] * rounds
for round_id in range(rounds):
sampled_digraph, node_l = graph_util.sample_graph(
digraph,
n_sampled_nodes=n_sampled_nodes
)
n_nodes[round_id] = len(sampled_digraph.nodes)
n_edges[round_id] = len(sampled_digraph.edges)
print('\t\tRound: %d, n_nodes: %d, n_edges:%d\n' % (round_id,
n_nodes[round_id],
n_edges[round_id]))
sampled_X = X[node_l]
MAP[round_id], prec_curv[round_id], err[round_id], err_b[round_id] = \
evaluateStaticGraphReconstruction(sampled_digraph, graph_embedding,
sampled_X, node_l,
is_undirected=is_undirected)
try:
summ_file.write('Err: %f/%f\n' % (np.mean(err), np.std(err)))
summ_file.write('Err_b: %f/%f\n' % (np.mean(err_b), np.std(err_b)))
except TypeError:
pass
summ_file.write('%f/%f\t%s\n' % (np.mean(MAP), np.std(MAP),
metrics.getPrecisionReport(prec_curv[0],
n_edges[0])))
pickle.dump([n_nodes,
n_edges,
MAP,
prec_curv,
err,
err_b],
open('%s_%s.gr' % (res_pre, m_summ), 'wb'))
| palash1992/GEM | gem/evaluation/evaluate_graph_reconstruction.py | Python | bsd-3-clause | 3,704 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-11 22:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField()),
('site_ended', models.CharField(choices=[('T', 'Terrorists'), ('CT', 'Counter-Terrorists')], max_length=255)),
('rounds_for', models.IntegerField()),
('rounds_against', models.IntegerField()),
],
),
migrations.CreateModel(
name='GamePlayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kills', models.IntegerField()),
('assists', models.IntegerField()),
('deaths', models.IntegerField()),
('mvps', models.IntegerField()),
('points', models.IntegerField()),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Game')),
],
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255)),
('rank', models.CharField(blank=True, choices=[(1, 'Silver I'), (2, 'Silver II'), (3, 'Silver III'), (4, 'Silver IV'), (5, 'Silver Elite'), (6, 'Silver Elite Master'), (7, 'Gold Nova I'), (8, 'Gold Nova II'), (9, 'Gold Nova III'), (10, 'Gold Nova Master'), (11, 'Master Guardian I'), (12, 'Master Guardian II'), (13, 'Master Guardian Elite'), (14, 'Distinguished Master Guardian'), (15, 'Legendary Eagle'), (16, 'Legendary Eagle Master'), (17, 'Supreme Master First Class'), (18, 'The Global Elite')], max_length=255, null=True)),
],
),
migrations.AddField(
model_name='gameplayer',
name='player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Player'),
),
migrations.AddField(
model_name='game',
name='game_map',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Map'),
),
migrations.AddField(
model_name='game',
name='players',
field=models.ManyToManyField(through='stats.GamePlayer', to='stats.Player'),
),
]
| andrijan/csgostats | cs-stats/stats/migrations/0001_initial.py | Python | bsd-3-clause | 3,104 |
import numpy as np
import theano
import theano.tensor as T
class GradientOptimizer:
def __init__(self, lr):
self.lr = lr
def __call__(self, cost, params):
pass
@property
def learningRate(self):
return self.lr
@learningRate.setter
def learningRate(self, i):
self.lr = i
class RMSprop(GradientOptimizer):
def __init__(self, lr=0.01, rho=0.9, epsilon=1e-6):
super(RMSprop, self).__init__(lr)
self.rho = rho
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + self.epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - self.lr * g))
return updates
class Adam(GradientOptimizer):
def __init__(self, lr=0.01, beta1=0.9, beta2=0.999, epsilon=1e-7):
super(Adam, self).__init__(lr)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
exp = theano.shared(np.float32(1.0),name='exp',borrow=True)
updates.append((exp, exp+1))
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_new = self.beta1 * m + (1 - self.beta1) * g
v_new = self.beta2 * v + (1 - self.beta2) * g**2
mt = m_new / (1 - self.beta1**exp)
vt = v_new / (1 - self.beta2**exp)
updates.append((m, m_new))
updates.append((v, v_new))
updates.append((p, p - self.lr * mt / (T.sqrt(vt) + self.epsilon)))
return updates
class Momentum(GradientOptimizer):
def __init__(self, lr=0.01, mu=0.5):
super(Momentum, self).__init__(lr)
self.mu = mu
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
v = theano.shared(p.get_value() * 0.)
new_v = self.mu * v + self.lr * g
updates.append((v, new_v))
updates.append((p, p - new_v))
return updates
class Nesterov(GradientOptimizer):
def __init__(self, lr=0.01, mu=0.5):
super(Nesterov, self).__init__(lr)
self.mu = mu
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
v = theano.shared(p.get_value() * 0.)
new_v = self.mu * v + self.lr * theano.clone(g, replace = {p: p - self.mu * v})
updates.append((v, new_v))
updates.append((p, p - new_v))
return updates
class Adagrad(GradientOptimizer):
def __init__(self, lr=0.01, epsilon=1e-7):
super(Adagrad, self).__init__(lr)
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = acc + g**2
updates.append((acc, acc_new))
updates.append((p, p - self.lr * g / T.sqrt(acc_new + self.epsilon)))
return updates
| aissehust/sesame-paste-noodle | mlbase/gradient_optimizer.py | Python | bsd-3-clause | 3,630 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
import numpy as np
from numpy import ma
def bin_spike(x, l):
"""
l is the number of points used for comparison, thus l=2 means that each
point will be compared only against the previous and following
measurements. l=2 is is probably not a good choice, too small.
Maybe use pstsd instead?
Dummy way to avoid warnings when x[ini:fin] are all masked.
Improve this in the future.
"""
assert x.ndim == 1, "I'm not ready to deal with multidimensional x"
assert l%2 == 0, "l must be an even integer"
N = len(x)
bin = ma.masked_all(N)
# bin_std = ma.masked_all(N)
half_window = int(l/2)
idx = (i for i in range(half_window, N - half_window) if np.isfinite(x[i]))
for i in idx:
ini = max(0, i - half_window)
fin = min(N, i + half_window)
# At least 3 valid points
if ma.compressed(x[ini:fin]).size >= 3:
bin[i] = x[i] - ma.median(x[ini:fin])
# bin_std[i] = (np.append(x[ini:i], x[i+1:fin+1])).std()
bin[i] /= (np.append(x[ini:i], x[i+1:fin+1])).std()
return bin
class Bin_Spike(object):
def __init__(self, data, varname, cfg, autoflag=True):
self.data = data
self.varname = varname
self.cfg = cfg
self.set_features()
if autoflag:
self.test()
def keys(self):
return self.features.keys() + \
["flag_%s" % f for f in self.flags.keys()]
def set_features(self):
self.features = {'bin_spike': bin_spike(self.data[self.varname],
self.cfg['l'])}
def test(self):
self.flags = {}
try:
threshold = self.cfg['threshold']
except:
print("Deprecated cfg format. It should contain a threshold item.")
threshold = self.cfg
try:
flag_good = self.cfg['flag_good']
flag_bad = self.cfg['flag_bad']
except:
print("Deprecated cfg format. It should contain flag_good & flag_bad.")
flag_good = 1
flag_bad = 3
assert (np.size(threshold) == 1) and \
(threshold is not None) and \
(np.isfinite(threshold))
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
flag[np.nonzero(self.features['bin_spike'] > threshold)] = flag_bad
flag[np.nonzero(self.features['bin_spike'] <= threshold)] = flag_good
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags['bin_spike'] = flag
| castelao/CoTeDe | cotede/qctests/bin_spike.py | Python | bsd-3-clause | 2,677 |
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import rfpipe, rfpipe.candidates
import pytest
from astropy import time
from numpy import degrees, nan, argmax, abs
tparams = [(0, 0, 0, 5e-3, 0.3, 0.0001, 0.0),]
# simulate no flag, transient/no flag, transient/flag
inprefs = [({'flaglist': [], 'chans': list(range(32)), 'sigma_image1': None,
'spw': [0], 'savecandcollection': True, 'savenoise': True,
'savecanddata': True, 'returncanddata': True, 'saveplots': True,
'fftmode': 'fftw', 'searchtype': 'imagek'}, 1),
({'simulated_transient': tparams, 'dmarr': [0, 1, 2], 'dtarr': [1, 2],
'savecanddata': True, 'savenoise': True, 'saveplots': True,
'returncanddata': True, 'savecandcollection': True,
'timesub': 'mean', 'fftmode': 'fftw', 'searchtype': 'imagek',
'sigma_image1': 10, 'sigma_kalman': 1,
'clustercands': True, 'flaglist': []}, 2),
({'simulated_transient': tparams, 'dmarr': [0, 1, 2], 'dtarr': [1, 2],
'savecanddata': True, 'savenoise': True, 'saveplots': True,
'returncanddata': True, 'savecandcollection': True,
'timesub': 'cs', 'fftmode': 'fftw', 'searchtype': 'imagek',
'sigma_image1': 10, 'sigma_kalman': 1,
'clustercands': True, 'flaglist': []}, 2),]
# ({'simulated_transient': tparams, 'dmarr': [0], 'dtarr': [1],
# 'savecands': True, 'savenoise': True,
# 'sigma_image1': 10, 'sigma_kalman': 1, 'sigma_arm': 2,
# 'sigma_arms': 4, 'timesub': None, 'fftmode': 'fftw',
# 'searchtype': 'armkimage', 'flaglist': []}, 2) # sigma_arms forced very low
#TODO: support arbitrary channel selection and
# {'read_tdownsample': 2, 'read_fdownsample': 2, 'npix_max': 512},
@pytest.fixture(scope="module", params=inprefs)
def mockstate(request):
inprefs, scan = request.param
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=scan, datasource='sim',
antconfig='D')
return rfpipe.state.State(inmeta=meta, inprefs=inprefs)
# simulate two DMs
@pytest.fixture(scope="module")
def mockdata(mockstate):
segment = 0
data = rfpipe.source.read_segment(mockstate, segment)
data[0, 0, 0, 0] = nan
return rfpipe.source.data_prep(mockstate, segment, data)
@pytest.fixture(scope="module")
def mockcc(mockstate):
cc = rfpipe.pipeline.pipeline_scan(mockstate)
return cc
def test_dataprep(mockstate, mockdata):
assert mockdata.shape == mockstate.datashape
def test_noise(mockstate, mockdata):
for noises in rfpipe.candidates.iter_noise(mockstate.noisefile):
assert len(noises)
def test_pipelinescan(mockcc):
if mockcc.prefs.simulated_transient is not None:
rfpipe.candidates.makesummaryplot(mockcc)
assert mockcc is not None
def test_voevent(mockcc):
if mockcc.prefs.simulated_transient is not None:
name = rfpipe.candidates.make_voevent(mockcc)
assert name is not None
def test_candids(mockcc):
if mockcc.prefs.simulated_transient is not None:
assert len(mockcc.candids)
def test_cc(mockcc):
if mockcc.prefs.returncanddata:
assert isinstance(mockcc.canddata, list)
assert len(mockcc.canddata) == len(mockcc)
if mockcc.prefs.savecandcollection:
ccs = rfpipe.candidates.iter_cands(mockcc.state.candsfile)
cc = sum(ccs)
assert len(cc) == len(mockcc)
if cc.prefs.returncanddata:
assert isinstance(cc.canddata, list)
assert len(cc.canddata) == len(cc)
assert len(cc.canddata) == len(mockcc.canddata)
def test_phasecenter_detection():
inprefs = {'simulated_transient': [(0, 1, 0, 5e-3, 0.3, -0.001, 0.),
(0, 9, 0, 5e-3, 0.3, 0., 0.),
(0, 19, 0, 5e-3, 0.3, 0.001, 0.)],
'dmarr': [0], 'dtarr': [1], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
assert cc.array['l1'][0] <= 0.
assert cc.array['l1'][1] == 0.
assert cc.array['l1'][2] >= 0.
assert all(abs(cc.array['m1']) <= 0.0003)
def test_phasecenter_detection_shift():
inprefs = {'simulated_transient': [(0, 1, 0, 5e-3, 0.3, -0.001, 0.),
(0, 9, 0, 5e-3, 0.3, 0., 0.),
(0, 19, 0, 5e-3, 0.3, 0.001, 0.)],
'dmarr': [0], 'dtarr': [1], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
meta['phasecenters'] = [(t0, t0+0.01/(24*3600), degrees(0.001), 0.),
(t0+0.01/(24*3600), t0+0.05/(24*3600), 0., 0.),
(t0+0.05/(24*3600), t0+0.1/(24*3600), degrees(-0.001), 0.)]
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
assert all(cc.array['l1'] == 0.)
assert all(cc.array['m1'] == 0.)
def test_wide_transient():
print("Try injecting a transient of width 40ms at integration 8")
inprefs = {'simulated_transient': [(0, 8, 0, 40e-3, 0.3, 0., 0.)],
'dmarr': [0], 'dtarr': [1,2,4,8], 'timesub': None, 'fftmode': 'fftw', 'searchtype': 'image',
'sigma_image1': 10, 'flaglist': [], 'uvres': 60, 'npix_max': 128, 'max_candfrac': 0}
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
ind = argmax(cc.array['snr1'])
assert cc.array['dtind'][ind] == 3
assert cc.array['integration'][ind]*2**cc.array['dtind'][ind] == 8
print("Try injecting a transient of width 20ms at integration 8")
inprefs['simulated_transient'] = [(0, 8, 0, 20e-3, 0.3, 0., 0.)]
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.1/(24*3600), 20, 4, 32*4, 2,
5e3, scan=1, datasource='sim',
antconfig='D')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs)
cc = rfpipe.pipeline.pipeline_scan(st)
ind = argmax(cc.array['snr1'])
assert cc.array['dtind'][ind] == 2
assert cc.array['integration'][ind]*2**cc.array['dtind'][ind] == 8
| realfastvla/rfpipe | tests/test_sim.py | Python | bsd-3-clause | 7,548 |
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq import privileges
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.filters.users import (
ChangeActionFilter,
ChangedByUserFilter,
EnterpriseUserFilter,
)
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin, PaginatedReportMixin
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.users.audit.change_messages import (
ASSIGNED_LOCATIONS_FIELD,
CHANGE_MESSAGES_FIELDS,
DOMAIN_FIELD,
LOCATION_FIELD,
PHONE_NUMBERS_FIELD,
ROLE_FIELD,
TWO_FACTOR_FIELD,
get_messages,
)
from corehq.apps.users.models import UserHistory
from corehq.const import USER_DATETIME_FORMAT
from corehq.util.timezones.conversions import ServerTime
class UserHistoryReport(GetParamsMixin, DatespanMixin, GenericTabularReport, ProjectReport, PaginatedReportMixin):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
fields = [
'corehq.apps.reports.filters.users.AffectedUserFilter',
'corehq.apps.reports.filters.users.ChangedByUserFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.users.ChangeActionFilter',
'corehq.apps.reports.filters.users.UserPropertyFilter',
'corehq.apps.reports.filters.users.UserUploadRecordFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
default_sort = {'changed_at': 'desc'}
@classmethod
def get_primary_properties(cls, domain):
"""
Get slugs and human-friendly names for the properties that are available
for filtering and/or displayed by default in the report, without
needing to click "See More".
"""
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_data_label = _("profile or user data")
else:
user_data_label = _("user data")
return {
"username": _("username"),
ROLE_FIELD: _("role"),
"email": _("email"),
DOMAIN_FIELD: _("project"),
"is_active": _("is active"),
"language": _("language"),
PHONE_NUMBERS_FIELD: _("phone numbers"),
LOCATION_FIELD: _("primary location"),
"user_data": user_data_label,
TWO_FACTOR_FIELD: _("two factor authentication disabled"),
ASSIGNED_LOCATIONS_FIELD: _("assigned locations"),
}
@property
def headers(self):
h = [
DataTablesColumn(_("Affected User"), sortable=False),
DataTablesColumn(_("Modified by User"), sortable=False),
DataTablesColumn(_("Action"), prop_name='action'),
DataTablesColumn(_("Via"), prop_name='changed_via'),
DataTablesColumn(_("Changes"), sortable=False),
DataTablesColumn(_("Change Message"), sortable=False),
DataTablesColumn(_("Timestamp"), prop_name='changed_at'),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
user_slugs = self.request.GET.getlist(EMWF.slug)
user_ids = self._get_user_ids(user_slugs)
# return empty queryset if no matching users were found
if user_slugs and not user_ids:
return UserHistory.objects.none()
changed_by_user_slugs = self.request.GET.getlist(ChangedByUserFilter.slug)
changed_by_user_ids = self._get_user_ids(changed_by_user_slugs)
# return empty queryset if no matching users were found
if changed_by_user_slugs and not changed_by_user_ids:
return UserHistory.objects.none()
user_property = self.request.GET.get('user_property')
actions = self.request.GET.getlist('action')
user_upload_record_id = self.request.GET.get('user_upload_record')
query = self._build_query(user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id)
return query
def _get_user_ids(self, slugs):
es_query = self._get_users_es_query(slugs)
return es_query.values_list('_id', flat=True)
def _get_users_es_query(self, slugs):
return EnterpriseUserFilter.user_es_query(
self.domain,
slugs,
self.request.couch_user,
)
def _build_query(self, user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id):
filters = Q(for_domain__in=self._for_domains())
if user_ids:
filters = filters & Q(user_id__in=user_ids)
if changed_by_user_ids:
filters = filters & Q(changed_by__in=changed_by_user_ids)
if user_property:
filters = filters & self._get_property_filters(user_property)
if actions and ChangeActionFilter.ALL not in actions:
filters = filters & Q(action__in=actions)
if user_upload_record_id:
filters = filters & Q(user_upload_record_id=user_upload_record_id)
if self.datespan:
filters = filters & Q(changed_at__lt=self.datespan.enddate_adjusted,
changed_at__gte=self.datespan.startdate)
return UserHistory.objects.filter(filters)
def _for_domains(self):
return BillingAccount.get_account_by_domain(self.domain).get_domains()
@staticmethod
def _get_property_filters(user_property):
if user_property in CHANGE_MESSAGES_FIELDS:
query_filters = Q(change_messages__has_key=user_property)
# to include CommCareUser creation from UI where a location can be assigned as a part of user creation
# which is tracked only under "changes" and not "change messages"
if user_property == LOCATION_FIELD:
query_filters = query_filters | Q(changes__has_key='location_id')
else:
query_filters = Q(changes__has_key=user_property)
return query_filters
@property
def rows(self):
records = self._get_queryset().order_by(self.ordering)[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield self._user_history_row(record, self.domain, self.timezone)
@property
def ordering(self):
by, direction = list(self.get_sorting_block()[0].items())[0]
return '-' + by if direction == 'desc' else by
@memoized
def _get_location_name(self, location_id):
from corehq.apps.locations.models import SQLLocation
if not location_id:
return None
try:
location_object = SQLLocation.objects.get(location_id=location_id)
except ObjectDoesNotExist:
return None
return location_object.display_name
def _user_history_row(self, record, domain, timezone):
return [
record.user_repr,
record.changed_by_repr,
_get_action_display(record.action),
record.changed_via,
self._user_history_details_cell(record.changes, domain),
self._html_list(list(get_messages(record.change_messages))),
ServerTime(record.changed_at).user_time(timezone).ui_string(USER_DATETIME_FORMAT),
]
def _html_list(self, changes):
items = []
if isinstance(changes, dict):
for key, value in changes.items():
if isinstance(value, dict):
value = self._html_list(value)
elif isinstance(value, list):
value = format_html(", ".join(value))
else:
value = format_html(str(value))
items.append("<li>{}: {}</li>".format(key, value))
elif isinstance(changes, list):
items = ["<li>{}</li>".format(format_html(change)) for change in changes]
return mark_safe(f"<ul class='list-unstyled'>{''.join(items)}</ul>")
def _user_history_details_cell(self, changes, domain):
properties = UserHistoryReport.get_primary_properties(domain)
properties.pop("user_data", None)
primary_changes = {}
all_changes = {}
for key, value in changes.items():
if key == 'location_id':
value = self._get_location_name(value)
primary_changes[properties[LOCATION_FIELD]] = value
all_changes[properties[LOCATION_FIELD]] = value
elif key == 'user_data':
for user_data_key, user_data_value in changes['user_data'].items():
all_changes[f"user data: {user_data_key}"] = user_data_value
elif key in properties:
primary_changes[properties[key]] = value
all_changes[properties[key]] = value
more_count = len(all_changes) - len(primary_changes)
return render_to_string("reports/standard/partials/user_history_changes.html", {
"primary_changes": self._html_list(primary_changes),
"all_changes": self._html_list(all_changes),
"more_count": more_count,
})
def _get_action_display(logged_action):
action = ugettext_lazy("Updated")
if logged_action == UserHistory.CREATE:
action = ugettext_lazy("Added")
elif logged_action == UserHistory.DELETE:
action = ugettext_lazy("Deleted")
return action
| dimagi/commcare-hq | corehq/apps/reports/standard/users/reports.py | Python | bsd-3-clause | 10,221 |
import threading
import numpy as np
def ros_ensure_valid_name(name):
return name.replace('-','_')
def lineseg_box(xmin, ymin, xmax, ymax):
return [ [xmin,ymin,xmin,ymax],
[xmin,ymax,xmax,ymax],
[xmax,ymax,xmax,ymin],
[xmax,ymin,xmin,ymin],
]
def lineseg_circle(x,y,radius,N=64):
draw_linesegs = []
theta = np.arange(N)*2*np.pi/N
xdraw = x+np.cos(theta)*radius
ydraw = y+np.sin(theta)*radius
for i in range(N-1):
draw_linesegs.append(
(xdraw[i],ydraw[i],xdraw[i+1],ydraw[i+1]))
draw_linesegs.append(
(xdraw[-1],ydraw[-1],xdraw[0],ydraw[0]))
return draw_linesegs
class SharedValue:
def __init__(self):
self.evt = threading.Event()
self._val = None
def set(self,value):
# called from producer thread
self._val = value
self.evt.set()
def is_new_value_waiting(self):
return self.evt.isSet()
def get(self,*args,**kwargs):
# called from consumer thread
self.evt.wait(*args,**kwargs)
val = self._val
self.evt.clear()
return val
def get_nowait(self):
# XXX TODO this is not atomic and is thus dangerous.
# (The value could get read, then another thread could set it,
# and only then might it get flagged as clear by this thread,
# even though a new value is waiting.)
val = self._val
self.evt.clear()
return val
class SharedValue1(object):
def __init__(self,initial_value):
self._val = initial_value
self.lock = threading.Lock()
def get(self):
self.lock.acquire()
try:
val = self._val
finally:
self.lock.release()
return val
def set(self,new_value):
self.lock.acquire()
try:
self._val = new_value
finally:
self.lock.release()
| motmot/fview | motmot/fview/utils.py | Python | bsd-3-clause | 1,929 |
# c: 14.04.2008, r: 14.04.2008
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
def get_pars(ts, coors, mode=None, region=None, ig=None, extra_arg=None):
if mode == 'special':
if extra_arg == 'hello!':
ic = 0
else:
ic = 1
return {('x_%s' % ic) : coors[:,ic]}
def get_p_edge(ts, coors, bc=None):
if bc.name == 'p_left':
return nm.sin(nm.pi * coors[:,1])
else:
return nm.cos(nm.pi * coors[:,1])
def get_circle(coors, domain=None):
r = nm.sqrt(coors[:,0]**2.0 + coors[:,1]**2.0)
return nm.where(r < 0.2)[0]
functions = {
'get_pars1' : (lambda ts, coors, mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hello!'),),
'get_p_edge' : (get_p_edge,),
'get_circle' : (get_circle,),
}
# Just another way of adding a function, besides 'functions' keyword.
function_1 = {
'name' : 'get_pars2',
'function' : lambda ts, coors,mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hi!'),
}
materials = {
'mf1' : (None, 'get_pars1'),
'mf2' : 'get_pars2',
# Dot denotes a special value, that is not propagated to all QP.
'mf3' : ({'a' : 10.0, 'b' : 2.0, '.c' : 'ahoj'},),
}
fields = {
'pressure' : (nm.float64, 1, 'Omega', 2),
}
variables = {
'p' : ('unknown field', 'pressure', 0),
'q' : ('test field', 'pressure', 'p'),
}
wx = 0.499
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < -%.3f)' % wx, {}),
'Right' : ('nodes in (x > %.3f)' % wx, {}),
'Circle' : ('nodes by get_circle', {}),
}
ebcs = {
'p_left' : ('Left', {'p.all' : 'get_p_edge'}),
'p_right' : ('Right', {'p.all' : 'get_p_edge'}),
}
equations = {
'e1' : """dw_laplace.2.Omega( mf3.a, q, p ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon, assert_
from sfepy.base.base import pause, debug
class Test( TestCommon ):
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf)
test = Test(problem = problem, conf = conf, options = options)
return test
from_conf = staticmethod( from_conf )
def test_material_functions(self):
problem = self.problem
ts = problem.get_default_ts(step=0)
problem.materials.time_update(ts,
problem.domain,
problem.equations)
coors = problem.domain.get_mesh_coors()
mat1 = problem.materials['mf1']
assert_(nm.all(coors[:,0] == mat1.get_data(None, None, 'x_0')))
mat2 = problem.materials['mf2']
assert_(nm.all(coors[:,1] == mat2.get_data(None, None, 'x_1')))
mat3 = problem.materials['mf3']
key = mat3.get_keys(region_name='Omega')[0]
assert_(nm.all(mat3.get_data(key, 0, 'a') == 10.0))
assert_(nm.all(mat3.get_data(key, 0, 'b') == 2.0))
assert_(mat3.get_data(None, None, 'c') == 'ahoj')
return True
# mat.time_update(ts, problem)
def test_ebc_functions(self):
import os.path as op
problem = self.problem
problem.set_equations(self.conf.equations)
problem.time_update()
vec = problem.solve()
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0] + '_ebc.vtk')
problem.save_state(name, vec)
ok = True
domain = problem.domain
iv = domain.regions['Left'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.sin(nm.pi * coors[:,1]),
label1='state_left', label2='bc_left')
iv = domain.regions['Right'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.cos(nm.pi * coors[:,1]),
label1='state_right', label2='bc_right')
return ok
def test_region_functions(self):
import os.path as op
problem = self.problem
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0])
problem.save_regions(name, ['Circle'])
return True
| olivierverdier/sfepy | tests/test_functions.py | Python | bsd-3-clause | 4,539 |
# -*- coding: utf-8 -*-
__version__ = '0.5.0'
request_post_identifier = 'current_aldryn_blog_entry'
| aldryn/aldryn-blog | aldryn_blog/__init__.py | Python | bsd-3-clause | 100 |
# -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from flask import flash, render_template, current_app
def flash_errors(form, category="warning"):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash("{0} - {1}"
.format(getattr(form, field).label.text, error), category)
def render_extensions(template_path, **kwargs):
"""
Wraps around the standard render template method and shoves in some other stuff out of the config.
:param template_path:
:param kwargs:
:return:
"""
return render_template(template_path,
_GOOGLE_ANALYTICS=current_app.config['GOOGLE_ANALYTICS'],
**kwargs)
| wdm0006/myflaskapp | myflaskapp/utils.py | Python | bsd-3-clause | 777 |
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.common.util
~~~~~~~~~~~~~~~~
Assorted utility code
"""
import os
from os import path as P
import sys
import re
from contextlib import contextmanager
import logging
import time
import textwrap
from datetime import datetime
from pkg_resources import Requirement
from pkg_resources import resource_filename
import six
import pypm
from zclockfile import LockFile
LOG = logging.getLogger(__name__)
# Language/library utilities
#####################################################################
def wrapped(txt, prefix='', **options):
"""Return wrapped text suitable for printing to terminal"""
MAX_WIDTH=70 # textwrap.wrap's default
return '\n'.join([
'{0}{1}'.format(prefix, line)
for line in textwrap.wrap(txt, width=MAX_WIDTH-len(prefix), **options)])
def lazyproperty(func):
"""A property decorator for lazy evaluation"""
cache = {}
def _get(self):
"""Return the property value from cache once it is calculated"""
try:
return cache[self]
except KeyError:
cache[self] = value = func(self)
return value
return property(_get)
def memoize(fn):
"""Memoize functions that take simple arguments
The arugments of this function must be 'hashable'
Keywords are not supported
"""
memo = {}
def wrapper(*args):
key = tuple(args)
if key not in memo:
memo[key] = fn(*args)
return memo[key]
return wrapper
class ConfigParserNamedLists(object):
"""Parse a named mapping from the configuration file.
Example input (config file):
[packages]
free = http://pypm-free.as.com
be = http://pypm-be.as.com
staging = http://pypm-staging.as.com
default = be free
QA = staging default
What this class produces (self.mapping):
{
'free': [factory('free', 'http://pypm-free.as.com')],
'be': [factory('be', 'http://pypm-be.as.com')],
'staging': [factory('staging', 'http://pypm-staging.as.com')],
'default': [factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
'QA': [factory('staging', 'http://pypm-staging.as.com'),
factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
}
"""
VALUE_SEP = re.compile('[\s,]+')
def __init__(self, option_items, factory, is_sentinel):
"""
- option_items: ConfigParser.items('yoursection')
- factory: a function that produces the value object
- sentinel_p: a function that returns True for sentinels
"""
self.option_items = option_items
self.factory = factory
self.is_sentinel = is_sentinel
self.mapping = {}
self._init()
def _init(self):
for name, value in self.option_items:
if name in self.mapping:
raise ValueError('duplicate option key found: {0}'.format(name))
else:
self.mapping[name] = value
# substitute references
_processed = set()
for name in self.mapping:
self._expand_rvalue(name, _processed)
def _expand_rvalue(self, name, processed):
if name in processed:
return
value = self.mapping[name]
if isinstance(value, list):
processed.add(name)
return
if name not in self.mapping:
raise ValueError('unknown option reference: {0}'.format(name))
if self.is_sentinel(value):
self.mapping[name] = [self.factory(name, value)]
else:
self.mapping[name] = []
for part in self.VALUE_SEP.split(value):
self._expand_rvalue(part, processed)
self.mapping[name].extend(self.mapping[part])
# System routines
######################################################################
@contextmanager
def locked(lockfile):
"""'with' context to lock a file"""
lock = LockFile(lockfile)
try:
yield
finally:
lock.close()
@contextmanager
def dlocked(directory):
"""Lock based on a directory
You need this function if you do not want more than on process to be
operating on a directory
"""
if not P.exists(directory):
os.makedirs(directory)
lockfile = P.join(directory, '.lock')
with locked(lockfile):
yield
def get_user_agent(default):
"""Return an user agent string representing PyPM
Retain the default user-agent for backward-compat
"""
return '{0} (PyPM {1.__version__})'.format(default, pypm)
# Path routines
# ########################################################################
def existing(path):
"""Return path, but assert its presence first"""
assert isinstance(path, (six.string_types, six.text_type)), \
'not of string type: %s <%s>' % (path, type(path))
assert P.exists(path), 'file/directory not found: %s' % path
return path
def concise_path(pth):
"""Return a concise, but human-understandable, version of ``pth``
Compresses %HOME% and %APPDATA%
"""
aliases = [
('%APPDATA%', os.getenv('APPDATA', None)),
('~', P.expanduser('~')),
]
for alias, pthval in aliases:
if pthval and pth.startswith(pthval):
return P.join(alias, P.relpath(pth, pthval))
return pth
def abs2rel(absolute_path):
"""Convert an absolute path to relative path assuming the topmost directory
is the bast dir.
>>> strip_abs_root('/opt/ActivePython/')
'opt/ActivePython/'
>>> strip_abs_root('/opt/ActivePython')
'opt/ActivePython'
"""
assert os.path.isabs(absolute_path), \
'`%s` is not a absolute path' % absolute_path
if sys.platform.startswith('win'):
assert absolute_path[1:3] == ':\\'
return absolute_path[3:] # remove the DRIVE
else:
assert absolute_path[0] == '/'
return absolute_path[1:] # remove the '/'
def url_join(url, components):
"""Join URL components .. always with a forward slash"""
assert type(components) is list
assert '\\' not in url, \
'URL is not supposed to contain backslashes. Is this windows path? '+url
return url + '/' + '/'.join(components)
def path_to_url(path):
"""Convert local path to remote url
"""
if sys.platform.startswith('win'):
assert '/' not in path, \
'windows path cannot contain forward slash: '+path
drive, path = os.path.splitdrive(path)
return url_join('file:///' + drive,
path.split('\\'))
else:
return 'file://' + P.abspath(path)
def pypm_file(*paths):
"""Return absolute path to a file residing inside the pypm package using
pkg_resources API"""
return resource_filename(Requirement.parse('pypm'), P.join(*paths))
class BareDateTime(six.text_type):
"""Wrapper around the DateTime object with our own standard string
representation
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
@classmethod
def to_string(cls, dt):
"""Convert the datetime object `dt` to a string
with format as defind by this class
"""
return dt.strftime(cls.FORMAT)
@classmethod
def to_datetime(cls, dt_string):
"""Convert dt_string, formatted by `to_string()` method above"""
ts = time.mktime(time.strptime(dt_string, cls.FORMAT))
return datetime.fromtimestamp(ts)
| igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/site-packages/pypm/common/util.py | Python | mit | 7,802 |
"""
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).
"""
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.safe_load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration()) | architecture-building-systems/CEAforArcGIS | bin/create_trace_graphviz.py | Python | mit | 1,081 |
from gmusicapi import Mobileclient
import getpass
class GpmSession(object):
# Private Variables
# Public Variables
api = None
logged_in = False
songs = None
playlists = None
# Constructor with optionally passed credentials
# Omit credentials if you want to handle login, include for prompts from this module
def __init__(self, email=None, pw=None):
self.api = Mobileclient()
if not email and not pw:
email = input("Please enter an email address tied to a GPM account: ")
pw = getpass.getpass("Please enter the password associated with %s: " % email)
self.logged_in = self.api.login(email, pw, Mobileclient.FROM_MAC_ADDRESS) # As per api protocol
if self.logged_in:
print("Google Play Music login successful")
else:
print("Google Play Music login failed")
def init(self, songs = True, playlists = True):
if songs:
self.songs = self.api.get_all_songs()
if playlists:
self.playlists = self.api.get_all_playlists()
def get_song_stream(self, title, artist=None):
print(not self.songs)
if not self.songs:
self.init(True, False)
song = next(iter((track for track in self.songs if self._filter_condition(track, title, artist)) or []), None)
if song:
return self.api.get_stream_url(song["id"])
else:
return None
def _filter_condition(self, song_obj, search_title, search_artist):
result = True
if search_title:
result = result & (song_obj["title"].lower().strip() == search_title.lower().strip())
if search_artist:
result = result & (song_obj["artist"].lower().strip() == search_artist.lower().strip())
return result
def main():
session = GpmSession()
while not session.logged_in:
session = GpmSession()
session.init()
print(session.get_song_stream("Dirty Laundry", "Bitter Sweet"))
print(session.get_song_stream("1940"))
if __name__ == "__main__":
main()
| sethraymond/JukeBot | src/libs/GooglePlayMusicController.py | Python | mit | 2,096 |
"""
Sql support for multilingual models
"""
| ziima/django-multilingual-ds9 | multilingual/models/sql/__init__.py | Python | mit | 44 |
# coding=utf8
import sublime
from .Base import Base
from ...utils import Debug
from ...utils.uiutils import get_prefix
class Outline(Base):
regions = {}
ts_view = None
def __init__(self, t3sviews):
super(Outline, self).__init__('Typescript : Outline View', t3sviews)
# SET TEXT
def set_text(self, edit_token, members, ts_view):
"""
This function takes the tss.js members structure instead of a string.
"""
# this will process the outline, even if the view is closed
self.ts_view = ts_view
if type(members) == list:
self._tssjs_2_outline_format(members)
elif type(members) == str:
self.text = members
super(Outline, self).set_text(edit_token, self.text)
def is_current_ts(self, ts_view):
if ts_view is None or self.ts_view is None:
return
return ts_view.id() == self.ts_view.id()
def _tssjs_2_outline_format(self, members):
text = []
line = 0
self.regions = {}
for member in members:
start_line = member['min']['line']
end_line = member['lim']['line']
left = member['min']['character']
right = member['lim']['character']
a = self.ts_view.text_point(start_line-1, left-1)
b = self.ts_view.text_point(end_line-1, right-1)
region = sublime.Region(a, b)
kind = get_prefix(member['loc']['kind'])
container_kind = get_prefix(member['loc']['containerKind'])
if member['loc']['kindModifiers'] != "":
member['loc']['kindModifiers'] = " " + member['loc']['kindModifiers']
if member['loc']['kind'] != 'class' and member['loc']['kind'] != 'interface':
t = "%s %s %s %s" % (kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
text.append('\n\t')
text.append(t.strip())
line += 1
self.regions[line] = region
else:
t = "%s %s %s %s {" % (container_kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
if len(text) == 0:
text.append('\n%s\n' % t.strip())
line += 2
self.regions[line - 1] = region
else:
text.append('\n\n}\n\n%s\n' % t.strip())
line += 5
self.regions[line - 1] = region
if len(members) == 0:
text.append("\n\nno members found\n")
self.text = ''.join(text)
is_focusing_ts_view = False
def on_click(self,line):
if self.is_focusing_ts_view:
Debug('focus', 'Outline.on_click: is just focusing other view > ignore')
return
if line in self.regions:
draw = sublime.DRAW_NO_FILL
self.ts_view.add_regions('typescript-definition', [self.regions[line]], 'comment', 'dot', draw)
self._focus_member_in_view(self.regions[line])
def _focus_member_in_view(self, region):
if self.ts_view.is_loading():
return
else:
Debug('focus', "_focus_member_in_view, Region @pos %i" % (region.begin()))
self.is_focusing_ts_view = True
self.ts_view.show(region)
self.ts_view.window().focus_view(self.ts_view)
self.is_focusing_ts_view = False
| Phaiax/ArcticTypescript | lib/display/views/Outline.py | Python | mit | 3,505 |
#-*- coding: utf-8 -*-
from PIL import Image, ImageChops, ImageDraw
from django.contrib.auth.models import User
from filer.models.foldermodels import Folder
from filer.models.clipboardmodels import Clipboard, ClipboardItem
def create_superuser():
superuser = User.objects.create_superuser('admin',
'[email protected]',
'secret')
return superuser
def create_folder_structure(depth=2, sibling=2, parent=None):
"""
This method creates a folder structure of the specified depth.
* depth: is an integer (default=2)
* sibling: is an integer (default=2)
* parent: is the folder instance of the parent.
"""
if depth > 0 and sibling > 0:
depth_range = range(1, depth+1)
depth_range.reverse()
for d in depth_range:
for s in range(1,sibling+1):
name = "folder: %s -- %s" %(str(d), str(s))
folder = Folder(name=name, parent=parent)
folder.save()
create_folder_structure(depth=d-1, sibling=sibling, parent=folder)
def create_clipboard_item(user, file):
clipboard, was_clipboard_created = Clipboard.objects.get_or_create(user=user)
clipboard_item = ClipboardItem(clipboard=clipboard, file=file)
return clipboard_item
def create_image(mode='RGB', size=(800, 600)):
image = Image.new(mode, size)
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 10, size[1] // 10
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'red')
return image
| croepha/django-filer | filer/tests/helpers.py | Python | mit | 1,697 |
# -*- coding:utf-8 -*-
#
# Import OBJ files
#
# External dependencies
import os
import numpy as np
import MeshToolkit as mtk
# Import a mesh from a OBJ / SMF file
def ReadObj( filename ) :
# Initialisation
vertices = []
faces = []
normals = []
colors = []
texcoords = []
material = ""
# Read each line in the file
for line in open( filename, "r" ) :
# Empty line / Comment
if line.isspace() or line.startswith( '#' ) : continue
# Split values in the line
values = line.split()
# Vertex
if values[0] == 'v' :
vertices.append( list( map( float, values[1:4] ) ) )
# Face (index starts at 1)
elif values[0] == 'f' :
faces.append( list( map( int, [ (v.split('/'))[0] for v in values[1:4] ] ) ) )
# Normal
elif values[0] == 'vn' :
normals.append( list( map( float, values[1:4] ) ) )
# Color
elif values[0] == 'c' :
colors.append( list( map( float, values[1:4] ) ) )
# Texture
elif values[0] == 'vt' :
texcoords.append( list( map( float, values[1:3] ) ) )
# Texture filename
elif values[0] == 'mtllib' :
material = values[1]
# Remap face indices
faces = np.array( faces ) - 1
# Return the final mesh
return mtk.Mesh( os.path.splitext(os.path.basename(filename))[0], vertices, faces, colors, material, texcoords, [], normals )
| microy/PyMeshToolkit | MeshToolkit/File/Obj.py | Python | mit | 1,285 |
#!/usr/bin/env python3
import sys
MOD = 123 # type: int
YES = "yes" # type: str
NO = "NO" # type: str
def solve(N: int, M: int, H: "List[List[str]]", A: "List[int]", B: "List[float]", Q: int, X: "List[int]"):
print(N, M)
assert len(H) == N - 1
for i in range(N - 1):
assert len(H[i]) == M - 2
print(*H[i])
assert len(A) == N - 1
assert len(B) == N - 1
for i in range(N - 1):
print(A[i], B[i])
print(Q)
assert len(X) == M + Q
for i in range(M + Q):
print(X[i])
print(YES)
print(NO)
print(MOD)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
M = int(next(tokens)) # type: int
H = [[next(tokens) for _ in range(M - 1 - 2 + 1)] for _ in range(N - 2 + 1)] # type: "List[List[str]]"
A = [int()] * (N - 2 + 1) # type: "List[int]"
B = [float()] * (N - 2 + 1) # type: "List[float]"
for i in range(N - 2 + 1):
A[i] = int(next(tokens))
B[i] = float(next(tokens))
Q = int(next(tokens)) # type: int
X = [int(next(tokens)) for _ in range(M + Q)] # type: "List[int]"
solve(N, M, H, A, B, Q, X)
if __name__ == '__main__':
main()
| kyuridenamida/atcoder-tools | tests/resources/test_codegen/test_default_code_generators_and_templates/python/expected_echo_generated_code.py | Python | mit | 1,316 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - fullsearch action
This is the backend of the search form. Search pages and print results.
@copyright: 2001 by Juergen Hermann <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
import re, time
from MoinMoin.Page import Page
from MoinMoin import wikiutil
from parsedatetime.parsedatetime import Calendar
from MoinMoin.web.utils import check_surge_protect
def checkTitleSearch(request):
""" Return 1 for title search, 0 for full text search, -1 for idiot spammer
who tries to press all buttons at once.
When used in FullSearch macro, we have 'titlesearch' parameter with
'0' or '1'. In standard search, we have either 'titlesearch' or
'fullsearch' with localized string. If both missing, default to
True (might happen with Safari) if this isn't an advanced search.
"""
form = request.values
if 'titlesearch' in form and 'fullsearch' in form:
ret = -1 # spammer / bot
else:
try:
ret = int(form['titlesearch'])
except ValueError:
ret = 1
except KeyError:
ret = ('fullsearch' not in form and not isAdvancedSearch(request)) and 1 or 0
return ret
def isAdvancedSearch(request):
""" Return True if advanced search is requested """
try:
return int(request.values['advancedsearch'])
except KeyError:
return False
def searchHints(f, hints):
""" Return a paragraph showing hints for a search
@param f: the formatter to use
@param hints: list of hints (as strings) to show
"""
return ''.join([
f.paragraph(1, attr={'class': 'searchhint'}),
# this is illegal formatter usage anyway, so we can directly use a literal
"<br>".join(hints),
f.paragraph(0),
])
def execute(pagename, request, fieldname='value', titlesearch=0, statistic=0):
_ = request.getText
titlesearch = checkTitleSearch(request)
if titlesearch < 0:
check_surge_protect(request, kick=True) # get rid of spammer
return
advancedsearch = isAdvancedSearch(request)
form = request.values
# context is relevant only for full search
if titlesearch:
context = 0
elif advancedsearch:
context = 180 # XXX: hardcoded context count for advancedsearch
else:
context = int(form.get('context', 0))
# Get other form parameters
needle = form.get(fieldname, '')
case = int(form.get('case', 0))
regex = int(form.get('regex', 0)) # no interface currently
hitsFrom = int(form.get('from', 0))
highlight_titles = int(form.get('highlight_titles', 1))
highlight_pages = int(form.get('highlight_pages', 1))
mtime = None
msg = ''
historysearch = 0
# if advanced search is enabled we construct our own search query
if advancedsearch:
and_terms = form.get('and_terms', '').strip()
or_terms = form.get('or_terms', '').strip()
not_terms = form.get('not_terms', '').strip()
#xor_terms = form.get('xor_terms', '').strip()
categories = form.getlist('categories') or ['']
timeframe = form.get('time', '').strip()
language = form.getlist('language') or ['']
mimetype = form.getlist('mimetype') or [0]
excludeunderlay = form.get('excludeunderlay', 0)
nosystemitems = form.get('nosystemitems', 0)
historysearch = form.get('historysearch', 0)
mtime = form.get('mtime', '')
if mtime:
mtime_parsed = None
# get mtime from known date/time formats
for fmt in (request.user.datetime_fmt,
request.cfg.datetime_fmt, request.user.date_fmt,
request.cfg.date_fmt):
try:
mtime_parsed = time.strptime(mtime, fmt)
except ValueError:
continue
else:
break
if mtime_parsed:
mtime = time.mktime(mtime_parsed)
else:
# didn't work, let's try parsedatetime
cal = Calendar()
mtime_parsed, parsed_what = cal.parse(mtime)
# XXX it is unclear if usage of localtime here and in parsedatetime module is correct.
# time.localtime is the SERVER's local time and of no relevance to the user (being
# somewhere in the world)
# mktime is reverse function for localtime, so this maybe fixes it again!?
if parsed_what > 0 and mtime_parsed <= time.localtime():
mtime = time.mktime(mtime_parsed)
else:
mtime_parsed = None # we don't use invalid stuff
# show info
if mtime_parsed:
# XXX mtime_msg is not shown in some cases
mtime_msg = _("(!) Only pages changed since '''%s''' are being displayed!",
wiki=True) % request.user.getFormattedDateTime(mtime)
else:
mtime_msg = _('/!\\ The modification date you entered was not '
'recognized and is therefore not considered for the '
'search results!', wiki=True)
else:
mtime_msg = None
word_re = re.compile(r'(\"[\w\s]+"|\w+)', re.UNICODE)
needle = ''
if categories[0]:
needle += 'category:%s ' % ','.join(categories)
if language[0]:
needle += 'language:%s ' % ','.join(language)
if mimetype[0]:
needle += 'mimetype:%s ' % ','.join(mimetype)
if excludeunderlay:
needle += '-domain:underlay '
if nosystemitems:
needle += '-domain:system '
if and_terms:
needle += '(%s) ' % and_terms
if not_terms:
needle += '(%s) ' % ' '.join(['-%s' % t for t in word_re.findall(not_terms)])
if or_terms:
needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))
# check for sensible search term
stripped = needle.strip()
if len(stripped) == 0:
request.theme.add_msg(_('Please use a more selective search term instead '
'of {{{"%s"}}}', wiki=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
needle = stripped
# Setup for type of search
if titlesearch:
title = _('Title Search: "%s"')
sort = 'page_name'
else:
if advancedsearch:
title = _('Advanced Search: "%s"')
else:
title = _('Full Text Search: "%s"')
sort = 'weight'
# search the pages
from MoinMoin.search import searchPages, QueryParser, QueryError
try:
query = QueryParser(case=case, regex=regex,
titlesearch=titlesearch).parse_query(needle)
except QueryError: # catch errors in the search query
request.theme.add_msg(_('Your search query {{{"%s"}}} is invalid. Please refer to '
'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
results = searchPages(request, query, sort, mtime, historysearch)
# directly show a single hit for title searches
# this is the "quick jump" functionality if you don't remember
# the pagename exactly, but just some parts of it
if titlesearch and len(results.hits) == 1:
page = results.hits[0]
if not page.attachment: # we did not find an attachment
page = Page(request, page.page_name)
querydict = {}
if highlight_pages:
highlight = query.highlight_re()
if highlight:
querydict.update({'highlight': highlight})
url = page.url(request, querystr=querydict)
request.http_redirect(url)
return
if not results.hits: # no hits?
f = request.formatter
querydict = dict(wikiutil.parseQueryString(request.query_string))
querydict.update({'titlesearch': 0})
request.theme.add_msg(_('Your search query {{{"%s"}}} didn\'t return any results. '
'Please change some terms and refer to HelpOnSearching for '
'more information.%s', wiki=True, percent=True) % (wikiutil.escape(needle),
titlesearch and ''.join([
'<br>',
_('(!) Consider performing a', wiki=True), ' ',
f.url(1, href=request.page.url(request, querydict, escape=0)),
_('full-text search with your search terms'),
f.url(0), '.',
]) or ''), "error")
Page(request, pagename).send_page()
return
# This action generates data using the user language
request.setContentLanguage(request.lang)
request.theme.send_title(title % needle, pagename=pagename)
# Start content (important for RTL support)
request.write(request.formatter.startContent("content"))
# Hints
f = request.formatter
hints = []
if titlesearch:
querydict = dict(wikiutil.parseQueryString(request.query_string))
querydict.update({'titlesearch': 0})
hints.append(''.join([
_("(!) You're performing a title search that might not include"
' all related results of your search query in this wiki. <<BR>>', wiki=True),
' ',
f.url(1, href=request.page.url(request, querydict, escape=0)),
f.text(_('Click here to perform a full-text search with your '
'search terms!')),
f.url(0),
]))
if advancedsearch and mtime_msg:
hints.append(mtime_msg)
if hints:
request.write(searchHints(f, hints))
# Search stats
request.write(results.stats(request, request.formatter, hitsFrom))
# Then search results
info = not titlesearch
if context:
output = results.pageListWithContext(request, request.formatter,
info=info, context=context, hitsFrom=hitsFrom, hitsInfo=1,
highlight_titles=highlight_titles,
highlight_pages=highlight_pages)
else:
output = results.pageList(request, request.formatter, info=info,
hitsFrom=hitsFrom, hitsInfo=1,
highlight_titles=highlight_titles,
highlight_pages=highlight_pages)
request.write(output)
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/action/fullsearch.py | Python | mit | 10,979 |
import unittest
from tests.test_basic import BaseTestCase
from datetime import timedelta, datetime, tzinfo
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
class UtilTestCase(BaseTestCase):
"""
Tests utils
"""
def test_parse_iso_8601_time_str(self):
"""
At times, Amazon hands us a timestamp with no microseconds.
"""
import datetime
from route53.util import parse_iso_8601_time_str
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 0, \
tzinfo=UTC()))
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01.001Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 1000, \
tzinfo=UTC()))
| EricSchles/python-route53 | tests/test_util.py | Python | mit | 916 |
from .tornadoconnection import TornadoLDAPConnection
| Noirello/bonsai | src/bonsai/tornado/__init__.py | Python | mit | 53 |
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from enocean.protocol.eep import EEP
eep = EEP()
# profiles = eep.
def test_first_range():
offset = -40
values = range(0x01, 0x0C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 40
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_second_range():
offset = -60
values = range(0x10, 0x1C)
for i in range(len(values)):
minimum = float(i * 10 + offset)
maximum = minimum + 80
profile = eep.find_profile([], 0xA5, 0x02, values[i])
assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
def test_rest():
profile = eep.find_profile([], 0xA5, 0x02, 0x20)
assert -10 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +41.2 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
profile = eep.find_profile([], 0xA5, 0x02, 0x30)
assert -40 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text)
assert +62.3 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
| kipe/enocean | enocean/protocol/tests/test_temperature_sensors.py | Python | mit | 1,616 |
def powers_of_two(limit):
value = 1
while value < limit:
yield value
value += value
# Use the generator
for i in powers_of_two(70):
print(i)
# Explore the mechanism
g = powers_of_two(100)
assert str(type(powers_of_two)) == "<class 'function'>"
assert str(type(g)) == "<class 'generator'>"
assert g.__next__() == 1
assert g.__next__() == 2
assert next(g) == 4
assert next(g) == 8
| rtoal/ple | python/powers_of_two.py | Python | mit | 409 |
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HReconnectMatchElementsRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HReconnectMatchElementsRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HReconnectMatchElementsRHS, self).__init__(name='HReconnectMatchElementsRHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(2, 0), (0, 1)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('ce9c5429-6e4c-4782-a83a-17e240381cb6')
# Set the node attributes
self.vs[0]["mm__"] = """MT_post__match_contains"""
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["GUID__"] = UUID('789662d8-ab7d-4640-a710-abbc847de320')
self.vs[1]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["GUID__"] = UUID('7e5e306f-cb65-40df-9e60-63b9fe83b79b')
self.vs[2]["mm__"] = """MT_post__MatchModel"""
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["GUID__"] = UUID('3c85bf70-be4a-40d8-9bcb-c138195ad20e')
from HReconnectMatchElementsLHS import HReconnectMatchElementsLHS
self.pre = HReconnectMatchElementsLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
# match_contains3
new_node = graph.add_node()
labels['3'] = new_node
graph.vs[new_node][Himesis.Constants.META_MODEL] = 'match_contains'
#===============================================================================
# Create new edges
#===============================================================================
# MatchModel1 -> match_contains3
graph.add_edges([(labels['1'], labels['3'])])
# match_contains3 -> MetaModelElement_S2
graph.add_edges([(labels['3'], labels['2'])])
#===============================================================================
# Set the output pivots
#===============================================================================
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
| levilucio/SyVOLT | GM2AUTOSAR_MM/merge_inter_layer_rules/Himesis/HReconnectMatchElementsRHS.py | Python | mit | 6,605 |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
from azure.mgmt.containerregistry.v2017_03_01.models import (
RegistryCreateParameters,
RegistryUpdateParameters,
StorageAccountParameters,
Sku,
SkuTier,
ProvisioningState,
PasswordName
)
import azure.mgmt.storage
from devtools_testutils import (
AzureMgmtTestCase, FakeStorageAccount,
ResourceGroupPreparer, StorageAccountPreparer
)
FAKE_STORAGE = FakeStorageAccount(
name='pyacr',
id=''
)
DEFAULT_LOCATION = 'westcentralus'
DEFAULT_SKU_NAME = 'Basic'
DEFAULT_KEY_VALUE_PAIR = {
'key': 'value'
}
class MgmtACRTest20170301(AzureMgmtTestCase):
def setUp(self):
super(MgmtACRTest20170301, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.containerregistry.ContainerRegistryManagementClient,
api_version='2017-03-01'
)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
@StorageAccountPreparer(name_prefix='pyacr', location=DEFAULT_LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_basic_registry(self, resource_group, location, storage_account, storage_account_key):
registry_name = self.get_resource_name('pyacr')
name_status = self.client.registries.check_name_availability(registry_name)
self.assertTrue(name_status.name_available)
# Create a Basic registry
registry = self.client.registries.create(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_create_parameters=RegistryCreateParameters(
location=location,
sku=Sku(
name=DEFAULT_SKU_NAME
),
storage_account=StorageAccountParameters(
name=storage_account.name,
access_key=storage_account_key
)
)
).result()
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.location, location)
self.assertEqual(registry.sku.name, DEFAULT_SKU_NAME)
self.assertEqual(registry.sku.tier, SkuTier.basic.value)
self.assertEqual(registry.provisioning_state.value, ProvisioningState.succeeded.value)
self.assertEqual(registry.admin_user_enabled, False)
registries = list(self.client.registries.list_by_resource_group(resource_group.name))
self.assertEqual(len(registries), 1)
# Update the registry with new tags and enable admin user
registry = self.client.registries.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
registry_update_parameters=RegistryUpdateParameters(
tags=DEFAULT_KEY_VALUE_PAIR,
admin_user_enabled=True
)
)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
registry = self.client.registries.get(resource_group.name, registry_name)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
credentials = self.client.registries.list_credentials(resource_group.name, registry_name)
self.assertEqual(len(credentials.passwords), 2)
credentials = self.client.registries.regenerate_credential(
resource_group.name, registry_name, PasswordName.password)
self.assertEqual(len(credentials.passwords), 2)
self.client.registries.delete(resource_group.name, registry_name)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/tests/test_mgmt_containerregistry_2017_03_01.py | Python | mit | 4,129 |
"""
Absorption chillers
"""
import cea.config
import cea.inputlocator
import pandas as pd
import numpy as np
from math import log, ceil
import sympy
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "Shanshan Hsieh"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
# technical model
def calc_chiller_main(mdot_chw_kgpers, T_chw_sup_K, T_chw_re_K, T_hw_in_C, T_ground_K, absorption_chiller):
"""
This model calculates the operation conditions of the absorption chiller given the chilled water loads in
evaporators and the hot water inlet temperature in the generator (desorber).
This is an empirical model using characteristic equation method developed by _[Kuhn A. & Ziegler F., 2005].
The parameters of each absorption chiller can be derived from experiments or performance curves from manufacturer's
catalog, more details are described in _[Puig-Arnavat M. et al, 2010].
Assumptions: constant external flow rates (chilled water at the evaporator, cooling water at the condenser and
absorber, hot water at the generator).
:param mdot_chw_kgpers: required chilled water flow rate
:type mdot_chw_kgpers: float
:param T_chw_sup_K: required chilled water supply temperature (outlet from the evaporator)
:type T_chw_sup_K: float
:param T_chw_re_K: required chilled water return temperature (inlet to the evaporator)
:type T_chw_re_K: float
:param T_hw_in_C: hot water inlet temperature to the generator
:type T_hw_in_C: float
:param T_ground_K: ground temperature
:type T_ground_K: float
:param locator: locator class
:return:
..[Kuhn A. & Ziegler F., 2005] Operational results of a 10kW absorption chiller and adaptation of the characteristic
equation. In: Proceedings of the interantional conference solar air conditioning. Bad Staffelstein, Germany: 2005.
..[Puig-Arnavat M. et al, 2010] Analysis and parameter identification for characteristic equations of single- and
double-effect absorption chillers by means of multivariable regression. Int J Refrig: 2010.
"""
chiller_prop = absorption_chiller.chiller_prop # get data from the class
# create a dict of input operating conditions
input_conditions = {'T_chw_sup_K': T_chw_sup_K,
'T_chw_re_K': T_chw_re_K,
'T_hw_in_C': T_hw_in_C,
'T_ground_K': T_ground_K}
mcp_chw_WperK = mdot_chw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK
q_chw_total_W = mcp_chw_WperK * (T_chw_re_K - T_chw_sup_K)
if np.isclose(q_chw_total_W, 0.0):
wdot_W = 0.0
q_cw_W = 0.0
q_hw_W = 0.0
T_hw_out_C = np.nan
EER = 0.0
input_conditions['q_chw_W'] = 0.0
else:
min_chiller_size_W = min(chiller_prop['cap_min'].values)
max_chiller_size_W = max(chiller_prop['cap_max'].values)
# get chiller properties and input conditions according to load
if q_chw_total_W < min_chiller_size_W:
# get chiller property according to load
chiller_prop = chiller_prop[chiller_prop['cap_min'] == min_chiller_size_W]
# operate at minimum load
number_of_chillers_activated = 1.0 # only activate one chiller
input_conditions['q_chw_W'] = chiller_prop['cap_min'].values # minimum load
elif q_chw_total_W <= max_chiller_size_W:
# get chiller property according to load
chiller_prop = chiller_prop[(chiller_prop['cap_min'] <= q_chw_total_W) &
(chiller_prop['cap_max'] >= q_chw_total_W)]
# operate one chiller at the cooling load
number_of_chillers_activated = 1.0 # only activate one chiller
input_conditions['q_chw_W'] = q_chw_total_W # operate at the chilled water load
else:
# get chiller property according to load
chiller_prop = chiller_prop[chiller_prop['cap_max'] == max_chiller_size_W]
# distribute loads to multiple chillers
number_of_chillers_activated = q_chw_total_W / max_chiller_size_W
# operate at maximum load
input_conditions['q_chw_W'] = max(chiller_prop['cap_max'].values)
absorption_chiller.update_data(chiller_prop)
operating_conditions = calc_operating_conditions(absorption_chiller, input_conditions)
# calculate chiller outputs
wdot_W = calc_power_demand(input_conditions['q_chw_W'], chiller_prop) * number_of_chillers_activated
q_cw_W = operating_conditions['q_cw_W'] * number_of_chillers_activated
q_hw_W = operating_conditions['q_hw_W'] * number_of_chillers_activated
T_hw_out_C = operating_conditions['T_hw_out_C']
EER = q_chw_total_W / (q_hw_W + wdot_W)
if T_hw_out_C < 0.0 :
print ('T_hw_out_C = ', T_hw_out_C, ' incorrect condition, check absorption chiller script.')
chiller_operation = {'wdot_W': wdot_W, 'q_cw_W': q_cw_W, 'q_hw_W': q_hw_W, 'T_hw_out_C': T_hw_out_C,
'q_chw_W': q_chw_total_W, 'EER': EER}
return chiller_operation
def calc_operating_conditions(absorption_chiller, input_conditions):
"""
Calculates chiller operating conditions at given input conditions by solving the characteristic equations and the
energy balance equations. This method is adapted from _[Kuhn A. & Ziegler F., 2005].
The heat rejection to cooling tower is approximated with the energy balance:
Q(condenser) + Q(absorber) = Q(generator) + Q(evaporator)
:param AbsorptionChiller chiller_prop: parameters in the characteristic equations and the external flow rates.
:param input_conditions:
:type input_conditions: dict
:return: a dict with operating conditions of the chilled water, cooling water and hot water loops in a absorption
chiller.
To improve speed, the system of equations was solved using sympy for the output variable ``q_hw_kW`` which is
then used to compute the remaining output variables. The following code was used to create the expression to
calculate ``q_hw_kW`` with::
# use symbolic computation to derive a formula for q_hw_kW:
# first, make sure all the variables are sympy symbols:
T_chw_in_C, T_chw_out_C, T_cw_in_C, T_hw_in_C, mcp_cw_kWperK, mcp_hw_kWperK, q_chw_kW = sympy.symbols(
"T_chw_in_C, T_chw_out_C, T_cw_in_C, T_hw_in_C, mcp_cw_kWperK, mcp_hw_kWperK, q_chw_kW")
T_hw_out_C, T_cw_out_C, q_hw_kW = sympy.symbols('T_hw_out_C, T_cw_out_C, q_hw_kW')
a_e, a_g, e_e, e_g, r_e, r_g, s_e, s_g = sympy.symbols("a_e, a_g, e_e, e_g, r_e, r_g, s_e, s_g")
ddt_e, ddt_g = sympy.symbols("ddt_e, ddt_g")
# the system of equations:
eq_e = s_e * ddt_e + r_e - q_chw_kW
eq_ddt_e = ((T_hw_in_C + T_hw_out_C) / 2.0
+ a_e * (T_cw_in_C + T_cw_out_C) / 2.0
+ e_e * (T_chw_in_C + T_chw_out_C) / 2.0
- ddt_e)
eq_g = s_g * ddt_g + r_g - q_hw_kW
eq_ddt_g = ((T_hw_in_C + T_hw_out_C) / 2.0
+ a_g * (T_cw_in_C
+ T_cw_out_C) / 2.0
+ e_g * (T_chw_in_C + T_chw_out_C) / 2.0
- ddt_g)
eq_bal_g = (T_hw_in_C - T_hw_out_C) - q_hw_kW / mcp_hw_kWperK
# solve the system of equations with sympy
eq_sys = [eq_e, eq_g, eq_bal_g, eq_ddt_e, eq_ddt_g]
unknown_variables = (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g)
a, b = sympy.linear_eq_to_matrix(eq_sys, unknown_variables)
T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g = tuple(*sympy.linsolve(eq_sys, unknown_variables))
q_hw_kW.simplify()
..[Kuhn A. & Ziegler F., 2005] Operational results of a 10kW absorption chiller and adaptation of the characteristic
equation. In: Proceedings of the interantional conference solar air conditioning. Bad Staffelstein, Germany: 2005.
"""
# external water circuits (e: chilled water, ac: cooling water, d: hot water)
T_hw_in_C = input_conditions['T_hw_in_C']
T_cw_in_C = input_conditions['T_ground_K'] - 273.0 # condenser water inlet temperature
T_chw_in_C = input_conditions['T_chw_re_K'] - 273.0 # inlet to the evaporator
T_chw_out_C = input_conditions['T_chw_sup_K'] - 273.0 # outlet from the evaporator
q_chw_kW = input_conditions['q_chw_W'] / 1000 # cooling load ata the evaporator
m_cw_kgpers = absorption_chiller.m_cw_kgpers # external flow rate of cooling water at the condenser and absorber
m_hw_kgpers = absorption_chiller.m_hw_kgpers # external flow rate of hot water at the generator
mcp_cw_kWperK = m_cw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
mcp_hw_kWperK = m_hw_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
# chiller_props (these are constants from the Absorption_chiller sheet in systems.xls)
s_e = absorption_chiller.s_e
r_e = absorption_chiller.r_e
s_g = absorption_chiller.s_g
r_g = absorption_chiller.r_g
a_e = absorption_chiller.a_e
e_e = absorption_chiller.e_e
a_g = absorption_chiller.a_g
e_g = absorption_chiller.e_g
# variables to solve
# T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g = sympy.symbols('T_hw_out_C T_cw_out_C q_hw_kW , ddt_e, ddt_g')
#
# # systems of equations to solve
# eq_e = s_e * ddt_e + r_e - q_chw_kW
# eq_ddt_e = ((T_hw_in_C + T_hw_out_C) / 2.0 + a_e * (T_cw_in_C + T_cw_out_C) / 2.0 + e_e * (T_chw_in_C + T_chw_out_C) / 2.0 - ddt_e)
# eq_g = s_g * ddt_g + r_g - q_hw_kW
# eq_ddt_g = ((T_hw_in_C + T_hw_out_C) / 2.0 + a_g * (T_cw_in_C + T_cw_out_C) / 2.0 + e_g * (T_chw_in_C + T_chw_out_C) / 2.0- ddt_g)
# eq_bal_g = (T_hw_in_C - T_hw_out_C) - q_hw_kW / mcp_hw_kWperK
#
# # solve the system of equations with sympy
# eq_sys = [eq_e, eq_g, eq_bal_g, eq_ddt_e, eq_ddt_g]
# unknown_variables = (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g)
# (T_hw_out_C, T_cw_out_C, q_hw_kW, ddt_e, ddt_g) = tuple(*sympy.linsolve(eq_sys, unknown_variables))
# a = np.array([
# [0, 0, 0, s_e, 0],
# [0, 0, -1, 0, s_g],
# [-1, 0, -1 / mcp_hw_kWperK, 0, 0],
# [0.5, 0, 0, -1, 0],
# [0.5, 0, 0, 0, -1]])
# b = np.array([
# [q_chw_kW - r_e],
# [-r_g],
# [-T_hw_in_C],
# [-0.5 * T_hw_in_C - 0.5 * e_e * (T_chw_in_C + T_chw_out_C)],
# [-0.5 * T_hw_in_C - 0.5 * e_g * (T_chw_in_C + T_chw_out_C)]])
# the below equation for q_hw_kW was created with sympy.linsolve using symbols for all the variables.
q_hw_kW = ((r_g * s_e * (0.5 * a_e * mcp_hw_kWperK + 0.25 * s_g * (a_e - a_g))
+ s_g * (0.5 * a_g * mcp_hw_kWperK * (q_chw_kW - r_e)
+ s_e * (0.5 * mcp_hw_kWperK
* (a_e * (0.5 * T_chw_in_C * e_g
+ 0.5 * T_chw_out_C * e_g
+ 0.5 * T_cw_in_C * a_g
+ 1.0 * T_hw_in_C)
- a_g * (0.5 * T_chw_in_C * e_e
+ 0.5 * T_chw_out_C * e_e
+ 0.5 * T_cw_in_C * a_e
+ 1.0 * T_hw_in_C))
- 0.25 * r_g * (a_e - a_g))))
/ (s_e * (0.5 * a_e * mcp_hw_kWperK + 0.25 * s_g * (a_e - a_g))))
# calculate results
q_cw_kW = q_hw_kW + q_chw_kW # Q(condenser) + Q(absorber)
T_hw_out_C = T_hw_in_C - q_hw_kW / mcp_hw_kWperK
T_cw_out_C = T_cw_in_C + q_cw_kW / mcp_cw_kWperK # TODO: set upper bound of the chiller operation
return {'T_hw_out_C': T_hw_out_C,
'T_cw_out_C': T_cw_out_C,
'q_chw_W': q_chw_kW * 1000,
'q_hw_W': q_hw_kW * 1000,
'q_cw_W': q_cw_kW * 1000}
def calc_power_demand(q_chw_W, chiller_prop):
"""
Calculates the power demand of the solution and refrigeration pumps in absorption chillers.
Linear equations derived from manufacturer's catalog _[Broad Air Conditioning, 2018].
:param q_chw_W:
:param ACH_type:
:return:
..[Broad Air Conditioning, 2018] BROAD XII NON-ELECTRIC CHILLER. (2018).
etrieved from https://www.broadusa.net/en/wp-content/uploads/2018/12/BROAD-XII-US-Catalog2018-12.pdf
"""
ach_type = chiller_prop['type'].values[0]
if ach_type == 'single':
w_dot_W = 0.0028 + 2941
else:
w_dot_W = 0.0021 * q_chw_W + 2757 # assuming the same for double and triple effect chillers
return w_dot_W
# Investment costs
def calc_Cinv_ACH(Q_nom_W, Absorption_chiller_cost_data, ACH_type):
"""
Annualized investment costs for the vapor compressor chiller
:type Q_nom_W : float
:param Q_nom_W: peak cooling demand in [W]
:returns InvCa: annualized chiller investment cost in CHF/a
:rtype InvCa: float
"""
Capex_a_ACH_USD = 0
Opex_fixed_ACH_USD = 0
Capex_ACH_USD = 0
if Q_nom_W > 0:
Absorption_chiller_cost_data = Absorption_chiller_cost_data[Absorption_chiller_cost_data['type'] == ACH_type]
max_chiller_size = max(Absorption_chiller_cost_data['cap_max'].values)
Q_nom_W = Absorption_chiller_cost_data['cap_min'].values.min() if Q_nom_W < Absorption_chiller_cost_data[
'cap_min'].values.min() else Q_nom_W # minimum technology size
if Q_nom_W <= max_chiller_size:
Absorption_chiller_cost_data = Absorption_chiller_cost_data[
(Absorption_chiller_cost_data['cap_min'] <= Q_nom_W) & (
Absorption_chiller_cost_data[
'cap_max'] > Q_nom_W)] # keep properties of the associated capacity
Inv_a = Absorption_chiller_cost_data.iloc[0]['a']
Inv_b = Absorption_chiller_cost_data.iloc[0]['b']
Inv_c = Absorption_chiller_cost_data.iloc[0]['c']
Inv_d = Absorption_chiller_cost_data.iloc[0]['d']
Inv_e = Absorption_chiller_cost_data.iloc[0]['e']
Inv_IR = Absorption_chiller_cost_data.iloc[0]['IR_%']
Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']
Inv_OM = Absorption_chiller_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_nom_W) ** Inv_c + (Inv_d + Inv_e * Q_nom_W) * log(Q_nom_W)
Capex_a_ACH_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_ACH_USD = InvC * Inv_OM
Capex_ACH_USD = InvC
else:
number_of_chillers = int(ceil(Q_nom_W / max_chiller_size))
Q_nom_each_chiller = Q_nom_W / number_of_chillers
for i in range(number_of_chillers):
Absorption_chiller_cost_data = Absorption_chiller_cost_data[
(Absorption_chiller_cost_data['cap_min'] <= Q_nom_each_chiller) & (
Absorption_chiller_cost_data[
'cap_max'] > Q_nom_each_chiller)] # keep properties of the associated capacity
Inv_a = Absorption_chiller_cost_data.iloc[0]['a']
Inv_b = Absorption_chiller_cost_data.iloc[0]['b']
Inv_c = Absorption_chiller_cost_data.iloc[0]['c']
Inv_d = Absorption_chiller_cost_data.iloc[0]['d']
Inv_e = Absorption_chiller_cost_data.iloc[0]['e']
Inv_IR = Absorption_chiller_cost_data.iloc[0]['IR_%']
Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']
Inv_OM = Absorption_chiller_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_nom_each_chiller) ** Inv_c + (Inv_d + Inv_e * Q_nom_each_chiller) * log(Q_nom_each_chiller)
Capex_a1 = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Capex_a_ACH_USD = Capex_a_ACH_USD + Capex_a1
Opex_fixed_ACH_USD = Opex_fixed_ACH_USD + InvC * Inv_OM
Capex_ACH_USD = Capex_ACH_USD + InvC
return Capex_a_ACH_USD, Opex_fixed_ACH_USD, Capex_ACH_USD
class AbsorptionChiller(object):
__slots__ = ["code", "chiller_prop", "m_cw_kgpers", "m_hw_kgpers",
"s_e", "r_e", "s_g", "r_g", "a_e", "e_e", "a_g", "e_g"]
def __init__(self, chiller_prop, ACH_type):
self.chiller_prop = chiller_prop[chiller_prop['type'] == ACH_type]
# copy first row to self for faster lookup (avoid pandas __getitem__ in tight loops)
self.code = chiller_prop['code'].values[0]
# external flow rate of cooling water at the condenser and absorber
self.m_cw_kgpers = chiller_prop['m_cw'].values[0]
# external flow rate of hot water at the generator
self.m_hw_kgpers = chiller_prop['m_hw'].values[0]
self.s_e = chiller_prop['s_e'].values[0]
self.r_e = chiller_prop['r_e'].values[0]
self.s_g = chiller_prop['s_g'].values[0]
self.r_g = chiller_prop['r_g'].values[0]
self.a_e = chiller_prop['a_e'].values[0]
self.e_e = chiller_prop['e_e'].values[0]
self.a_g = chiller_prop['a_g'].values[0]
self.e_g = chiller_prop['e_g'].values[0]
def update_data(self, chiller_prop):
"""Due to how AbsorptionChiller is currently used (FIXME: can we fix this?), we somedimes need to update
the instance variables from the databaframe chiller_prop.
"""
if self.code != chiller_prop['code'].values[0]:
# only update if new code...
# print("Updating chiller_prop data! old code: {0}, new code: {1}".format(self.code, chiller_prop['code'].values[0]))
self.code = chiller_prop['code'].values[0]
self.m_cw_kgpers = chiller_prop['m_cw'].values[0]
self.m_hw_kgpers = chiller_prop['m_hw'].values[0]
self.s_e = chiller_prop['s_e'].values[0]
self.r_e = chiller_prop['r_e'].values[0]
self.s_g = chiller_prop['s_g'].values[0]
self.r_g = chiller_prop['r_g'].values[0]
self.a_e = chiller_prop['a_e'].values[0]
self.e_e = chiller_prop['e_e'].values[0]
self.a_g = chiller_prop['a_g'].values[0]
self.e_g = chiller_prop['e_g'].values[0]
def main(config):
"""
run the whole preprocessing routine
test case 1) q_hw_W = 24213, q_chw_W = 20088, EER = 0.829, T_hw_out_C = 67.22 _[Kuhn, 2011]
test case 2) q_hw_W = 824105, q_chw_W = 1163011, EER = 1.41, T_hw_out_C = 165.93 _[Shirazi, 2016]
test case 3) q_hw_W = 623379, q_chw_W = 1163430, EER = 1.87, T_hw_out_C = 195.10 _[Shirazi, 2016]
..[Kuhn A., Ozgur-Popanda C., & Ziegler F., 2011] A 10kW Indirectly Fired Absorption Heat Pump: Concepts for a
reversible operation. 10th International Heat Pump Conference, 2011.
..[Shirazi A., Taylor R.A., White S.D., Morrison G.L.] A systematic parametric study and feasibility assessment
of solar-assisted single-effect, double-effect, and triple-effect absorption chillers for heating and cooling
applications. Energy Conversion and Management, 2016
"""
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
# Input parameters for test cases
case_1_dict = {'mdot_chw_kgpers':0.8, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 286.0, 'T_hw_in_C': 84.6, 'ACH_type': 'single'}
case_2_dict = {'mdot_chw_kgpers': 39.7, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 287.0, 'T_hw_in_C': 180,
'ACH_type': 'double'}
case_3_dict = {'mdot_chw_kgpers': 55.6, 'T_chw_sup_K': 280.0, 'T_chw_re_K': 285.0, 'T_hw_in_C': 210,
'ACH_type': 'triple'}
# Unpack parameters
case_dict = case_1_dict
mdot_chw_kgpers = case_dict['mdot_chw_kgpers']
T_chw_sup_K = case_dict['T_chw_sup_K']
T_chw_re_K = case_dict['T_chw_re_K']
T_hw_in_C = case_dict['T_hw_in_C']
T_ground_K = 300
ach_type = case_dict['ACH_type']
chiller_prop = AbsorptionChiller(pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Absorption_chiller"), ach_type)
chiller_operation = calc_chiller_main(mdot_chw_kgpers, T_chw_sup_K, T_chw_re_K, T_hw_in_C, T_ground_K, chiller_prop)
print(chiller_operation)
print('test_decentralized_buildings_cooling() succeeded. Please doubel check results in the description.')
if __name__ == '__main__':
main(cea.config.Configuration())
| architecture-building-systems/CEAforArcGIS | cea/technologies/chiller_absorption.py | Python | mit | 20,692 |
"""
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import locale
import os
import subprocess
import sys
import tempfile
PY3K = sys.version_info >= (3, 0, 0)
class MarkupRenderer(object):
def __init__(self):
self.renderer_options = {}
def load_settings(self, global_setting, renderer_options):
self.renderer_options = renderer_options
@classmethod
def is_enabled(cls, filename, syntax):
return False
def render(self, text, **kwargs):
raise NotImplementedError()
class InputMethod(object):
STDIN = 1
TEMPFILE = 2
FILE = 3
class CommandlineRenderer(MarkupRenderer):
def __init__(self, input_method=InputMethod.STDIN, executable=None, args=[]):
super(CommandlineRenderer, self).__init__()
self.input_method = input_method
self.executable = executable
self.args = args
def pre_process_encoding(self, text, **kwargs):
return text.encode('utf-8')
def pre_process(self, text, **kwargs):
return text
def post_process(self, rendered_text, **kwargs):
return rendered_text
def post_process_encoding(self, rendered_text, **kwargs):
return rendered_text.decode('utf-8')
def render(self, text, **kwargs):
text = self.pre_process_encoding(text, **kwargs)
text = self.pre_process(text, **kwargs)
text = self.executable_check(text, kwargs['filename'])
text = self.post_process_encoding(text, **kwargs)
return self.post_process(text, **kwargs)
def executable_check(self, text, filename):
tempfile_ = None
result = ''
try:
args = [self.get_executable()]
if self.input_method == InputMethod.STDIN:
args.extend(self.get_args())
elif self.input_method == InputMethod.TEMPFILE:
_, ext = os.path.splitext(filename)
tempfile_ = tempfile.NamedTemporaryFile(suffix=ext)
tempfile_.write(text)
tempfile_.flush()
args.extend(self.get_args(filename=tempfile_.name()))
text = None
elif self.input_method == InputMethod.FILE:
args.extend(self.get_args(filename=filename))
text = None
else:
return u''
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=self.get_startupinfo())
result, errdata = proc.communicate(text)
if len(errdata) > 0:
print(errdata)
finally:
if tempfile_ is not None:
tempfile_.close() # Also delete file
return result.strip()
def get_executable(self):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
if isinstance(self.executable, unicode):
encoding = locale.getpreferredencoding()
return self.executable.encode(encoding)
return self.executable
def get_args(self, filename=None):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
encoding = locale.getpreferredencoding()
args = [arg if isinstance(arg, str) else arg.encode(encoding) for arg in self.args]
else:
args = self.args
return [arg.format(filename=filename) for arg in args]
def get_startupinfo(self):
if os.name != 'nt':
return None
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
return info
def renderer(renderer_type):
renderer_type.IS_VALID_RENDERER__ = True
return renderer_type
| Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/base_renderer.py | Python | mit | 4,974 |
"""
Test that no StopIteration is raised inside a generator
"""
# pylint: disable=missing-docstring,invalid-name,import-error, try-except-raise, wrong-import-position,not-callable,raise-missing-from
import asyncio
class RebornStopIteration(StopIteration):
"""
A class inheriting from StopIteration exception
"""
# This one is ok
def gen_ok():
yield 1
yield 2
yield 3
# pylint should warn about this one
# because of a direct raising of StopIteration inside generator
def gen_stopiter():
yield 1
yield 2
yield 3
raise StopIteration # [stop-iteration-return]
# pylint should warn about this one
# because of a direct raising of an exception inheriting from StopIteration inside generator
def gen_stopiterchild():
yield 1
yield 2
yield 3
raise RebornStopIteration # [stop-iteration-return]
# pylint should warn here
# because of the possibility that next raises a StopIteration exception
def gen_next_raises_stopiter():
g = gen_ok()
while True:
yield next(g) # [stop-iteration-return]
# This one is the same as gen_next_raises_stopiter
# but is ok because the next function is inside
# a try/except block handling StopIteration
def gen_next_inside_try_except():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that don't handle StopIteration
def gen_next_inside_wrong_try_except():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that handle StopIteration but reraise it
def gen_next_inside_wrong_try_except2():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
raise StopIteration # [stop-iteration-return]
# Those two last are ok
def gen_in_for():
for el in gen_ok():
yield el
def gen_yield_from():
yield from gen_ok()
def gen_dont_crash_on_no_exception():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
raise
def gen_dont_crash_on_uninferable():
# https://github.com/PyCQA/pylint/issues/1779
yield from iter()
raise asyncio.TimeoutError()
# https://github.com/PyCQA/pylint/issues/1830
def gen_next_with_sentinel():
yield next([], 42) # No bad return
from itertools import count
# https://github.com/PyCQA/pylint/issues/2158
def generator_using_next():
counter = count()
number = next(counter)
yield number * 2
# pylint: disable=no-self-use,too-few-public-methods
class SomeClassWithNext:
def next(self):
return iter([1, 2, 3])
def some_gen(self):
for value in self.next():
yield value
SomeClassWithNext().some_gen()
def something_invalid():
raise Exception('cannot iterate this')
def invalid_object_passed_to_next():
yield next(something_invalid()) # [stop-iteration-return]
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/stop_iteration_inside_generator.py | Python | mit | 3,242 |
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.xaxis", **kwargs):
super(RangebreaksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/xaxis/_rangebreaks.py | Python | mit | 3,243 |
import unittest
import numpy as np
from bsym import ColourOperation, Configuration
from unittest.mock import patch
class ColourOperationTestCase( unittest.TestCase ):
"""Tests for colour operation methods"""
def test_symmetry_operation_is_initialised_from_a_matrix( self ):
matrix = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation( matrix, colour_mapping=mapping )
np.testing.assert_array_equal( co.matrix, matrix )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector( self ):
vector = [ 2, 3, 1 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 }, { 1: 1, 0: 0 } ]
co = ColourOperation.from_vector( vector, mapping )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector_with_label( self ):
vector = [ 2, 3, 1 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
label = 'A'
co = ColourOperation.from_vector( vector, mapping, label=label )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.label, label )
self.assertEqual( co.colour_mapping, mapping )
def test_symmetry_operation_is_initialised_with_label( self ):
matrix = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
label = 'E'
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation( matrix, mapping, label=label )
self.assertEqual( co.label, label )
self.assertEqual( co.colour_mapping, mapping )
def test_from_vector_counting_from_zero( self ):
vector = [ 1, 2, 0 ]
mapping = [ { 1: 0, 0: 1 }, { 1: 1, 0: 0 } ]
co = ColourOperation.from_vector( vector, mapping, count_from_zero=True )
np.testing.assert_array_equal( co.matrix, np.array( [ [ 0, 0, 1 ], [ 1, 0, 0 ], [ 0, 1, 0 ] ] ) )
self.assertEqual( co.colour_mapping, mapping )
def test_operate_on( self ):
matrix = np.array( [ [ 0, 1, 0 ], [ 0, 0, 1 ], [ 1, 0, 0 ] ] )
colour_mapping = [ { 1:1, 2:2, 3:3 },
{ 1:2, 2:3, 3:1 },
{ 1:3, 2:2, 3:1 } ]
co = ColourOperation( matrix, colour_mapping )
configuration = Configuration( [ 1, 2, 3 ] )
co.operate_on( configuration )
np.testing.assert_array_equal( co.operate_on( configuration ).vector, np.array( [ 2, 1, 3 ] ) )
def test_mul( self ):
matrix_a = np.array( [ [ 1, 0 ], [ 0, 1 ] ] )
colour_mapping_a = [ { 0:1, 1:0 }, { 0:1, 1:0 } ]
matrix_b = np.array( [ [ 0, 1 ], [ 1, 0 ] ] )
colour_mapping_b = [ { 0:1, 1:0 }, { 0:1, 1:0 } ]
co_a = ColourOperation( matrix_a, colour_mapping_a )
co_b = ColourOperation( matrix_b, colour_mapping_b )
co_c = co_a * co_b
np.testing.assert_array_equal( co_c.matrix , np.array( [ [ 0, 1 ], [ 1, 0 ] ] ) )
self.assertEqual( co_c.colour_mapping, [ { 0:0, 1:1 }, { 0:0, 1:1 } ] )
if __name__ == '__main__':
unittest.main()
| bjmorgan/bsym | tests/unit_tests/test_colour_operation.py | Python | mit | 3,191 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_wpn_heavy_blaster.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","wpn_heavy_blaster")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/ship/crafted/weapon/shared_wpn_heavy_blaster.py | Python | mit | 474 |
from .humannum import (
K,
M,
G,
T,
P,
E,
Z,
Y,
humannum,
parsenum,
parseint,
value_to_unit,
unit_to_value,
)
__all__ = [
'K',
'M',
'G',
'T',
'P',
'E',
'Z',
'Y',
'humannum',
'parsenum',
'parseint',
'value_to_unit',
'unit_to_value',
]
| sejust/pykit | humannum/__init__.py | Python | mit | 341 |
import datetime
from django.contrib import admin
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from models import Invoice, InvoiceItem
class InvoiceItemInline(admin.TabularInline):
fieldsets = (
(
None,
{
'fields': ('title', 'quantity', 'unit', 'unit_price', 'tax_rate', 'weight')
}
),
)
model = InvoiceItem
extra = 0
class OverdueFilter(admin.SimpleListFilter):
title = _('overdue')
parameter_name = 'overdue'
def lookups(self, request, model_admin):
return (
('no', _('no')),
('yes', _('yes')),
)
def queryset(self, request, queryset):
if self.value() == 'no':
return queryset.filter(Q(date_due__gt=datetime.datetime.combine(now().date(), datetime.time.max))|Q(status=Invoice.STATUS.PAID))
if self.value() == 'yes':
return queryset.filter(date_due__lt=datetime.datetime.combine(now().date(), datetime.time.max)).exclude(status=Invoice.STATUS.PAID)
class InvoiceAdmin(admin.ModelAdmin):
date_hierarchy = 'date_issue'
list_display = ['pk', 'type', 'full_number', 'status', 'customer_name', 'customer_country',
'subtotal', 'vat', 'total', 'currency', 'date_issue', 'payment_term', 'is_overdue_boolean', 'is_paid']
list_editable = ['status']
list_filter = ['type', 'status', 'payment_method', OverdueFilter,
#'language', 'currency'
]
search_fields = ['number', 'subtitle', 'note', 'supplier_name', 'customer_name', 'shipping_name']
inlines = (InvoiceItemInline, )
fieldsets = (
(_(u'General information'), {
'fields': (
'type', 'number', 'full_number', 'status', 'subtitle', 'language', 'note',
'date_issue', 'date_tax_point', 'date_due', 'date_sent'
)
}),
(_(u'Contact details'), {
'fields': (
'issuer_name', 'issuer_email', 'issuer_phone'
)
}),
(_(u'Payment details'), {
'fields': (
'currency', 'discount', 'credit',
#'already_paid',
'payment_method', 'constant_symbol', 'variable_symbol', 'specific_symbol', 'reference',
'bank_name', 'bank_country', 'bank_city', 'bank_street', 'bank_zip', 'bank_iban', 'bank_swift_bic'
)
}),
(_(u'Supplier details'), {
'fields': (
'supplier_name', 'supplier_street', 'supplier_zip', 'supplier_city', 'supplier_country',
'supplier_registration_id', 'supplier_tax_id', 'supplier_vat_id', 'supplier_additional_info'
)
}),
(_(u'Customer details'), {
'fields': (
'customer_name', 'customer_street', 'customer_zip', 'customer_city', 'customer_country',
'customer_registration_id', 'customer_tax_id', 'customer_vat_id', 'customer_additional_info',
)
}),
(_(u'Shipping details'), {
'fields': (
'shipping_name', 'shipping_street', 'shipping_zip', 'shipping_city', 'shipping_country'
)
})
)
def is_overdue_boolean(self, invoice):
return invoice.is_overdue
is_overdue_boolean.boolean = True
is_overdue_boolean.short_description = _(u'Is overdue')
def is_paid(self, invoice):
return invoice.status == Invoice.STATUS.PAID
is_paid.boolean = True
is_paid.short_description = _(u'Is paid')
admin.site.register(Invoice, InvoiceAdmin)
| Mercy-Nekesa/sokoapp | sokoapp/invoicing/admin.py | Python | mit | 3,672 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-11-14 21:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe', '0010_auto_20171114_1443'),
]
operations = [
migrations.RemoveField(
model_name='direction',
name='recipe',
),
migrations.DeleteModel(
name='Direction',
),
]
| RyanNoelk/OpenEats | api/v1/recipe/migrations/0011_auto_20171114_1543.py | Python | mit | 466 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
__author__ = 'Anubhav Jain'
__copyright__ = 'Copyright 2014, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = '[email protected]'
__date__ = 'Oct 03, 2014'
| fraricci/pymatgen | pymatgen/analysis/cost/__init__.py | Python | mit | 300 |
# -*- coding: utf-8 -*-
#
# agile-analytics documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 17 13:58:53 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agile-analytics'
copyright = u'2016, Chris Heisel'
author = u'Chris Heisel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'agile-analytics v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'agile-analyticsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'agile-analytics.tex', u'agile-analytics Documentation',
u'Chris Heisel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agile-analytics', u'agile-analytics Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'agile-analytics', u'agile-analytics Documentation',
author, 'agile-analytics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| cmheisel/agile-analytics | docs/conf.py | Python | mit | 9,780 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_fed_dub_2x10_honorguard_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","fed_dub_2x10_honorguard_deed_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/deed/event_perk/shared_fed_dub_2x10_honorguard_deed.py | Python | mit | 491 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/container/drum/shared_pob_ship_loot_box.iff"
result.attribute_template_id = -1
result.stfName("space/space_interaction","pob_loot")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/container/drum/shared_pob_ship_loot_box.py | Python | mit | 468 |
#!/usr/bin/env python
import TransferErrors as TE
import cPickle as pickle
with open('stuck.pkl','rb') as pklfile:
stuck = pickle.load(pklfile)
TE.makeBasicTable(stuck,TE.workdir+'html/table.html',TE.webdir+'table.html')
TE.makeCSV(stuck,TE.webdir+'data.csv')
for basis in [-6,-5,-4,-3,-1,1,2]:
TE.makeJson(stuck,TE.webdir+('stuck_%i'%basis).replace('-','m')+'.json',basis)
| sidnarayanan/TransferErrors | bin/write.py | Python | mit | 382 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/naboo/shared_waterfall_naboo_falls_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/static/naboo/shared_waterfall_naboo_falls_01.py | Python | mit | 450 |
# created by Chirath R, [email protected]
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from workshop.views import WorkshopRegistrationListView, WorkshopDetailView, WorkshopRegistrationUpdateView, \
WorkshopRegisterFormView, WorkshopListView, WorkshopFeedbackCreateView, WorkshopGalleryCreateView, \
WorkshopGalleryListView, WorkshopGalleryDeleteView, WorkshopCreateView, WorkshopUpdateView, WorkshopDeleteView
urlpatterns = [
url(r'^$', WorkshopListView.as_view(), name='workshop_list'),
url(r'^create/$', login_required(WorkshopCreateView.as_view()), name='workshop_create'),
url(r'^(?P<workshop_id>[0-9]+)/$', WorkshopDetailView.as_view(), name='workshop_detail'),
# TODO(2) Fix update and uncomment
# url(r'^(?P<pk>[0-9]+)/update/$', login_required(WorkshopUpdateView.as_view()), name='workshopdetail_update'),
url(r'^(?P<pk>[0-9]+)/delete/$', login_required(WorkshopDeleteView.as_view()), name='workshop_delete'),
url(r'^(?P<workshop_id>[0-9]+)/register/$', WorkshopRegisterFormView.as_view(), name='workshop_register'),
url(r'^(?P<workshop_id>[0-9]+)/register/list/$',
login_required(WorkshopRegistrationListView.as_view()), name='workshop_registration_list'),
url(r'^(?P<workshop_id>[0-9]+)/register/update/$',
login_required(WorkshopRegistrationUpdateView.as_view()), name='workshop_update'),
url(r'^success/$',
TemplateView.as_view(template_name='workshop/success.html'), name='workshop_registration_success'),
url(r'^(?P<workshop_id>[0-9]+)/feedback/$', WorkshopFeedbackCreateView.as_view(), name='workshop_feedback'),
url(r'^feedback/success/$',
TemplateView.as_view(template_name='workshop/success_feedback.html'), name='feedback_success'),
url(r'^(?P<pk>[0-9]+)/add-image/$', login_required(WorkshopGalleryCreateView.as_view()), name='image_create'),
url(r'^(?P<pk>[0-9]+)/gallery/$', WorkshopGalleryListView.as_view(), name='image_list'),
url(r'^image/(?P<pk>[0-9]+)/delete/$', login_required(WorkshopGalleryDeleteView.as_view()), name='image_delete'),
]
| amfoss/fosswebsite | workshop/urls.py | Python | mit | 2,168 |
# Globals for the directions
# Change the values as you see fit
EAST = None
NORTH = None
WEST = None
SOUTH = None
class Robot:
def __init__(self, direction=NORTH, x_pos=0, y_pos=0):
pass
| jmluy/xpython | exercises/practice/robot-simulator/robot_simulator.py | Python | mit | 201 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-09 01:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hordak", "0005_account_currencies")]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION check_leg()
RETURNS trigger AS
$$
DECLARE
tx_id INT;
non_zero RECORD;
BEGIN
IF (TG_OP = 'DELETE') THEN
tx_id := OLD.transaction_id;
ELSE
tx_id := NEW.transaction_id;
END IF;
SELECT ABS(SUM(amount)) AS total, amount_currency AS currency
INTO non_zero
FROM hordak_leg
WHERE transaction_id = tx_id
GROUP BY amount_currency
HAVING ABS(SUM(amount)) > 0
LIMIT 1;
IF FOUND THEN
RAISE EXCEPTION 'Sum of transaction amounts in each currency must be 0. Currency % has non-zero total %',
non_zero.currency, non_zero.total;
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql;
"""
)
]
| adamcharnock/django-hordak | hordak/migrations/0006_auto_20161209_0108.py | Python | mit | 1,488 |
# -*- coding: utf-8 -*-
from thriftpy.protocol import TJSONProtocol
from thriftpy.thrift import TPayload, TType
from thriftpy.transport import TMemoryBuffer
from thriftpy._compat import u
import thriftpy.protocol.json as proto
class TItem(TPayload):
thrift_spec = {
1: (TType.I32, "id"),
2: (TType.LIST, "phones", (TType.STRING)),
}
default_spec = [("id", None), ("phones", None)]
def test_map_to_obj():
val = [{"key": "ratio", "value": "0.618"}]
spec = [TType.STRING, TType.DOUBLE]
obj = proto.map_to_obj(val, spec)
assert {"ratio": 0.618} == obj
def test_map_to_json():
obj = {"ratio": 0.618}
spec = [TType.STRING, TType.DOUBLE]
json = proto.map_to_json(obj, spec)
assert [{"key": "ratio", "value": 0.618}] == json
def test_list_to_obj():
val = [4, 8, 4, 12, 67]
spec = TType.I32
obj = proto.list_to_obj(val, spec)
assert [4, 8, 4, 12, 67] == obj
def test_list_to_json():
val = [4, 8, 4, 12, 67]
spec = TType.I32
json = proto.list_to_json(val, spec)
assert [4, 8, 4, 12, 67] == json
def test_struct_to_json():
obj = TItem(id=13, phones=["5234", "12346456"])
json = proto.struct_to_json(obj)
assert {"id": 13, "phones": ["5234", "12346456"]} == json
def test_struct_to_obj():
json = {"id": 13, "phones": ["5234", "12346456"]}
obj = TItem()
obj = proto.struct_to_obj(json, obj)
assert obj.id == 13 and obj.phones == ["5234", "12346456"]
def test_json_proto_api_write():
obj = TItem(id=13, phones=["5234", "12346456"])
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
p.write_struct(obj)
data = trans.getvalue().decode("utf-8")
length = data[0:4]
import json
data = json.loads(data[4:])
assert length == "\x00\x00\x00S" and data == {
"metadata": {"version": 1},
"payload": {"phones": ["5234", "12346456"], "id": 13}}
def test_json_proto_api_read():
obj = TItem(id=13, phones=["5234", "12346456"])
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
p.write_struct(obj)
obj2 = TItem()
obj2 = p.read_struct(obj2)
assert obj.id == 13 and obj.phones == ["5234", "12346456"]
def test_unicode_string():
class Foo(TPayload):
thrift_spec = {
1: (TType.STRING, "name")
}
default_spec = [("name", None)]
trans = TMemoryBuffer()
p = TJSONProtocol(trans)
foo = Foo(name=u('pão de açúcar'))
foo.write(p)
foo2 = Foo()
foo2.read(p)
assert foo == foo2
| maralla/thriftpy | tests/test_json_protocol.py | Python | mit | 2,543 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Customizations for the cloudsearchdomain command.
This module customizes the cloudsearchdomain command:
* Add validation that --endpoint-url is required.
"""
def register_cloudsearchdomain(cli):
cli.register_last('calling-command.cloudsearchdomain',
validate_endpoint_url)
def validate_endpoint_url(parsed_globals, **kwargs):
if parsed_globals.endpoint_url is None:
return ValueError(
"--endpoint-url is required for cloudsearchdomain commands")
| LockScreen/Backend | venv/lib/python2.7/site-packages/awscli/customizations/cloudsearchdomain.py | Python | mit | 1,074 |
from http import HTTPStatus
from typing import Callable
from typing import Dict
from typing import List
from typing import Union
import demistomock as demisto
import requests
from CommonServerPython import *
from CommonServerUserPython import *
from intezer_sdk import consts
from intezer_sdk.analysis import Analysis
from intezer_sdk.analysis import get_analysis_by_id
from intezer_sdk.analysis import get_latest_analysis
from intezer_sdk.api import IntezerApi
from intezer_sdk.errors import AnalysisIsAlreadyRunning
from intezer_sdk.errors import AnalysisIsStillRunning
from intezer_sdk.errors import FamilyNotFoundError
from intezer_sdk.errors import HashDoesNotExistError
from intezer_sdk.errors import InvalidApiKey
from intezer_sdk.family import Family
from intezer_sdk.sub_analysis import SubAnalysis
from requests import HTTPError
''' CONSTS '''
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
IS_AVAILABLE_URL = 'is-available'
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def _get_missing_file_result(file_hash: str) -> CommandResults:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
return CommandResults(
readable_output=f'The Hash {file_hash} was not found on Intezer genome database',
outputs={
outputPaths['dbotscore']: dbot
}
)
def _get_missing_analysis_result(analysis_id: str, sub_analysis_id: str = None) -> CommandResults:
if not sub_analysis_id:
output = f'The Analysis {analysis_id} was not found on Intezer Analyze'
else:
output = f'Could not find the analysis \'{analysis_id}\' or the sub analysis \'{sub_analysis_id}\''
return CommandResults(
readable_output=output
)
def _get_missing_family_result(family_id: str) -> CommandResults:
return CommandResults(
readable_output=f'The Family {family_id} was not found on Intezer Analyze'
)
def _get_analysis_running_result(analysis_id: str = None, response: requests.Response = None) -> CommandResults:
if response:
analysis_id = response.json()['result_url'].split('/')[2]
context_json = {
'ID': analysis_id,
'Status': 'InProgress'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output='Analysis is still in progress',
outputs=context_json
)
''' COMMANDS '''
def check_is_available(intezer_api: IntezerApi, args: dict) -> str:
try:
response = intezer_api.get_url_result(f'/{IS_AVAILABLE_URL}')
return 'ok' if response else 'Empty response from intezer service'
except InvalidApiKey as error:
return f'Invalid API key received.\n{error}'
except HTTPError as error:
return f'Error occurred when reaching Intezer Analyze. Please check Analyze Base URL. \n{error}'
except ConnectionError as error:
return f'Error connecting to Analyze Base url.\n{error}'
def analyze_by_hash_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
analysis = Analysis(file_hash=file_hash, api=intezer_api)
try:
analysis.send()
analysis_id = analysis.analysis_id
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis_id)
)
except HashDoesNotExistError:
return _get_missing_file_result(file_hash)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
latest_analysis = get_latest_analysis(file_hash=file_hash, api=intezer_api)
if not latest_analysis:
return _get_missing_file_result(file_hash)
return enrich_dbot_and_display_file_analysis_results(latest_analysis.result())
def analyze_by_uploaded_file_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
file_id = args.get('file_entry_id')
file_data = demisto.getFilePath(file_id)
try:
analysis = Analysis(file_path=file_data['path'], api=intezer_api)
analysis.send()
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis.analysis_id)
)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
response = intezer_api.get_url_result(f'/endpoint-analyses/{analysis_id}')
analysis_result = response.json()['result']
else:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
analysis_result = analysis.result()
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
else:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
return command_results
def get_analysis_sub_analyses_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
except AnalysisIsStillRunning:
return _get_analysis_running_result(analysis_id=str(analysis_id))
sub_analyses: List[SubAnalysis] = analysis.get_sub_analyses()
all_sub_analyses_ids = [sub.analysis_id for sub in sub_analyses]
sub_analyses_table = tableToMarkdown('Sub Analyses', all_sub_analyses_ids, headers=['Analysis IDs'])
context_json = {
'ID': analysis.analysis_id,
'SubAnalysesIDs': all_sub_analyses_ids
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output=sub_analyses_table,
outputs=context_json,
raw_response=all_sub_analyses_ids
)
def get_analysis_code_reuse_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis(analysis_id=sub_analysis_id,
composed_analysis_id=analysis_id,
sha256='',
source='',
api=intezer_api)
sub_analysis_code_reuse = sub_analysis.code_reuse
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
if not sub_analysis_code_reuse:
return CommandResults(
readable_output='No code reuse for this analysis'
)
families = sub_analysis_code_reuse.pop('families') if 'families' in sub_analysis_code_reuse else None
readable_output = tableToMarkdown('Code Reuse', sub_analysis_code_reuse)
if families:
readable_output += '\nFamilies:\n'
readable_output += '\n'.join(tableToMarkdown(family['family_name'], family) for family in families)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=sub_analysis.code_reuse
)
def get_analysis_metadata_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis(analysis_id=sub_analysis_id,
composed_analysis_id=analysis_id,
sha256='',
source='',
api=intezer_api)
sub_analysis_metadata = sub_analysis.metadata
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
metadata_table = tableToMarkdown('Analysis Metadata', sub_analysis_metadata)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'Metadata': sub_analysis_metadata
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'Metadata': sub_analysis_metadata
}
}
return CommandResults(
readable_output=metadata_table,
outputs=context_json,
raw_response=sub_analysis_metadata
)
def get_family_info_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
family_id = args.get('family_id')
family = Family(family_id, api=intezer_api)
try:
family.fetch_info()
except FamilyNotFoundError:
return _get_missing_family_result(str(family_id))
output = {
'ID': family_id,
'Name': family.name,
'Type': family.type
}
markdown = tableToMarkdown('Family Info', output)
return CommandResults(
readable_output=markdown,
outputs_prefix='Intezer.Family',
outputs=output
)
# region Enrich DBot
def enrich_dbot_and_display_file_analysis_results(intezer_result):
verdict = intezer_result.get('verdict')
sha256 = intezer_result.get('sha256')
analysis_id = intezer_result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': intezer_result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
md = tableToMarkdown('Analysis Report', intezer_result)
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({intezer_result["sub_verdict"]})\n'
if 'family_name' in intezer_result:
presentable_result += f'Family: **{intezer_result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
presentable_result += md
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
def enrich_dbot_and_display_endpoint_analysis_results(intezer_result, indicator_name=None) -> CommandResults:
verdict = intezer_result['verdict']
computer_name = intezer_result['computer_name']
analysis_id = intezer_result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': intezer_result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if intezer_result.get('families') is not None:
presentable_result += f'Families: **{intezer_result["families"]}**\n'
presentable_result += f' Scan Time: {intezer_result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
# endregion
''' EXECUTION CODE '''
def main():
command = None
try:
handle_proxy()
intezer_api_key = demisto.getParam('APIKey')
intezer_base_url_param = demisto.getParam('AnalyzeBaseURL')
use_ssl = not demisto.params().get('insecure', False)
analyze_base_url = intezer_base_url_param or consts.BASE_URL
intezer_api = IntezerApi(consts.API_VERSION, intezer_api_key, analyze_base_url, use_ssl)
command_handlers: Dict[str, Callable[[IntezerApi, dict], Union[List[CommandResults], CommandResults, str]]] = {
'test-module': check_is_available,
'intezer-analyze-by-hash': analyze_by_hash_command,
'intezer-analyze-by-file': analyze_by_uploaded_file_command,
'intezer-get-latest-report': get_latest_result_command,
'intezer-get-analysis-result': check_analysis_status_and_get_results_command,
'intezer-get-sub-analyses': get_analysis_sub_analyses_command,
'intezer-get-analysis-code-reuse': get_analysis_code_reuse_command,
'intezer-get-analysis-metadata': get_analysis_metadata_command,
'intezer-get-family-info': get_family_info_command
}
command = demisto.command()
command_handler = command_handlers[command]
command_results = command_handler(intezer_api, demisto.args())
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}')
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| demisto/content | Packs/Intezer/Integrations/IntezerV2/IntezerV2.py | Python | mit | 16,706 |
from schema import *
| ckan/ckanext-issues | ckanext/issues/logic/schema/__init__.py | Python | mit | 21 |
# -*- coding: utf-8 -*-
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1
assert FieldTest.ProtectedStaticField == 1
assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ = FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateField
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(ValueError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
ob.BooleanField = 1
assert ob.BooleanField is True
ob.BooleanField = 0
assert ob.BooleanField is False
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == 1.1
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
def test_nullable_field():
"""Test nullable fields."""
ob = FieldTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
FieldTest().Int32Field = None
with pytest.raises(TypeError):
FieldTest().EnumField = None
| AlexCatarino/pythonnet | tests/test_field.py | Python | mit | 8,719 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
| SUSE/azure-sdk-for-python | azure-mgmt-storage/azure/mgmt/storage/v2015_06_15/models/usage.py | Python | mit | 1,829 |
#!/usr/bin/env python
# ======================================================================
import pangloss
import sys,getopt,cPickle,numpy
import scipy.stats as stats
# ======================================================================
def Calibrate(argv):
"""
NAME
Calibrate.py
PURPOSE
Transform the results of the lightcone reconstruction process,
Pr(kappah|D), into our target PDF, Pr(kappa|D).
COMMENTS
All PDF input is provided as a list of samples. There are two
modes of operation:
1) The Pr(kappah|C) for an ensemble of calibration lightcones are
compressed into a single number (currently the
median), and then combined with the true kappa values to make
Pr(kappa,kappah|C). This is written out as a 2D sample list.
2) The Pr(kappah|D) for a single observed lightcone is compressed
into a single number (currently the median). This is then used
to take a slice from Pr(kappa,kappah|C) to make Pr(kappa|D,C).
Both 1 and 2 can be carried out in series if desired (Mode=3).
FLAGS
-h Print this message [0]
INPUTS
configfile Plain text file containing Pangloss configuration
OPTIONAL INPUTS
--mode Operating mode 1,2 or 3. See COMMENTS above.
OUTPUTS
stdout Useful information
samples From 1) Pr(kappa,kappah|C) or 2) Pr(kappa|D,C)
EXAMPLE
Calibrate.py example.config
BUGS
AUTHORS
This file is part of the Pangloss project, distributed under the
GPL v2, by Tom Collett (IoA) and Phil Marshall (Oxford).
Please cite: Collett et al 2013, http://arxiv.org/abs/1303.6564
HISTORY
2013-03-21 started Collett & Marshall (Oxford)
"""
# --------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,"hm:",["help","mode"])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
print Calibrate.__doc__ # will print the big comment above.
return
Mode=3
for o,a in opts:
if o in ("-h", "--help"):
print Calibrate.__doc__
return
elif o in ("-m", "--mode"):
Mode = int(a)
assert Mode < 4 and Mode >0, "unhandled Mode"
else:
assert False, "unhandled option"
# Check for setup file in array args:
if len(args) == 1:
configfile = args[0]
print pangloss.doubledashedline
print pangloss.hello
print pangloss.doubledashedline
print "Calibrate: transforming Pr(kappah|D) to Pr(kappa|D)"
print "Calibrate: taking instructions from",configfile
else:
print Calibrate.__doc__
return
# --------------------------------------------------------------------
# Read in configuration, and extract the ones we need:
experiment = pangloss.Configuration(configfile)
EXP_NAME = experiment.parameters['ExperimentName']
Nc = experiment.parameters['NCalibrationLightcones']
comparator=experiment.parameters['Comparator']
comparatorType=experiment.parameters['ComparatorType']
comparatorWidth=experiment.parameters['ComparatorWidth']
# Figure out which mode is required:
ModeName = experiment.parameters['CalibrateMode']
if ModeName=='Joint': Mode = 1
if ModeName=='Slice': Mode = 2
if ModeName=='JointAndSlice': Mode = 3
CALIB_DIR = experiment.parameters['CalibrationFolder'][0]
jointdistfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'.pickle'
jointdistasPDFfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'_asPDF.pickle'
# Final result is PDF for kappa:
x = experiment.parameters['ObservedCatalog'][0]
resultfile = x.split('.')[0]+"_"+EXP_NAME+"_PofKappa.pickle"
# --------------------------------------------------------------------
# Mode 1: generate a joint distribution, eg Pr(kappah,kappa)
# from the calibration dataset:
if Mode==1 or Mode==3:
print pangloss.dashedline
# First find the calibration pdfs for kappa_h:
calpickles = []
for i in range(Nc):
calpickles.append(experiment.getLightconePickleName('simulated',pointing=i))
calresultpickles=[]
if comparator=="Kappah" and comparatorType=="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_median.pickle"
calresultpickles.append(pfile)
elif comparator=="Kappah" and comparatorType!="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_"+comparatorType+".pickle"
calresultpickles.append(pfile)
else:
print "Calibrate: Unrecognised comparator "+Comparator
print "Calibrate: If you want to use a comparator other than kappa_h, "
print "Calibrate: you'll need to code it up!"
print "Calibrate: (This should be easy, but you can ask [email protected] for help)."
exit()
# Now calculate comparators:
callist=numpy.empty((Nc,2))
jd=pangloss.PDF(["kappa_ext",comparator+'_'+comparatorType])
for i in range(Nc):
C = calresultpickles[i]
pdf = pangloss.readPickle(C)
if comparator=="Kappah":
if comparatorType=="median":
# Recall that we created a special file for this
# choice of comparator and comparator type, in
# Reconstruct. You could also use the
# comparatortype=="mean" code, swapping mean for median.
callist[i,0]=pdf[0]
callist[i,1]=pdf[1][0]
elif comparatorType=="mean":
callist[i,0] = pdf.truth[0]
callist[i,1] = numpy.mean(pdf.samples)
else:
print "Calibrate: Unrecognised comparatorType "+comparatorType
print "Calibrate: If you want to use a comparatorType other than median "
print "Calibrate: or mean, you'll need to code it up!"
print "Calibrate: (This should be easy, but you can ask [email protected] for help)."
exit()
jd.append(callist[i])
pangloss.writePickle(callist,jointdistfile)
# Also store the joint dist as a pangloss pdf:
pangloss.writePickle(jd,jointdistasPDFfile)
# Plot:
plotfile = jointdistasPDFfile.split('.')[0]+'.png'
jd.plot("Kappah_median","kappa_ext",weight=None,output=plotfile,title="The joint distribution of $\kappa_{\mathrm{ext}}$ and calibrator \n\n (more correlated means a better calibrator!)")
print "Calibrate: calibration joint PDF saved in:"
print "Calibrate: "+jointdistfile
print "Calibrate: and "+jointdistasPDFfile
print "Calibrate: you can view this PDF in "+plotfile
# --------------------------------------------------------------------
# Mode 2: calibrate a real line of sight's Pr(kappah|D) using the
# joint distribution Pr(kappa,<kappah>|D)
if Mode==2 or Mode==3:
print pangloss.dashedline
callibguide = pangloss.readPickle(jointdistfile)
obspickle = experiment.getLightconePickleName('real')
pfile = obspickle.split('.')[0].split("_lightcone")[0]+'_'+EXP_NAME+"_PofKappah.pickle"
pdf=pangloss.readPickle(pfile)
if comparator=="Kappah":
if comparatorType=="median":# note we created a special file for this choice of comparator and comparator type. You could also use the comparatortype=="mean" code swapping mean for median.
RealComparator=numpy.median(pdf.samples)
elif comparatorType=="mean":
RealComparator=numpy.mean(pdf.samples)
else:
print "I don't know that comparatorType. exiting"
exit()
pdf = pangloss.PDF(["kappa_ext","weight"])
#print RealComparator
#print numpy.median(callibguide[:,1]),numpy.std(callibguide[:,1])
dif=(callibguide[:,1]-RealComparator)
weights=dif*0.0
weights[numpy.abs(dif)<comparatorWidth]=1.
weights/=numpy.sum(weights)
samples=callibguide[:,0]
samplesandweights=callibguide.copy()
samplesandweights[:,1]=weights
pdf.samples=(samplesandweights)
plotfile = resultfile.split('.')[0]+".png"
pdf.plot('kappa_ext',weight='weight',output=plotfile)
average = numpy.average(samples, weights=weights)
variance = numpy.dot(weights, (samples-average)**2)/weights.sum()
average,std=average, variance**.5
#if step function weights can calculate 68%CL easily:
included=samples[weights>0]
onesigconfidence=numpy.abs(\
stats.scoreatpercentile(included,84)-
stats.scoreatpercentile(included,16)\
)/2.
pangloss.writePickle(pdf,resultfile)
print "Calibrate: your reconstructed lightcone has been calibrated,"
print "Calibrate: suggesting it has a kappa_ext of",\
"%.3f +\- %.3f"%(average,onesigconfidence)
print "Calibrate: the PDF for kappa_ext has been output to "+resultfile
print "Calibrate: in the form of sample kappa_ext values, and their weights."
print "Calibrate: you can view this PDF in "+plotfile
print
print "Calibrate: To read and process this file, try:"
print
print " import pangloss"
print " pdf = pangloss.readPickle(\"%s\")"%resultfile
print " kappa_samples = pdf.getParameter(\"kappa_ext\")"
print " kappa_weights = pdf.getParameter(\"weight\")"
# --------------------------------------------------------------------
print
print pangloss.doubledashedline
return resultfile,jointdistasPDFfile
# ======================================================================
if __name__ == '__main__':
Calibrate(sys.argv[1:])
# ======================================================================
| enoordeh/Pangloss | Calibrate.py | Python | gpl-2.0 | 10,551 |
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.ProfilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerGroupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
| rbramwell/pulp | bindings/pulp/bindings/bindings.py | Python | gpl-2.0 | 3,641 |
import re
WHITE_LIST = {
'names': {
'eno': {},
'evo': {},
'ii': {},
'li': {'alias': 'Ii'},
'utö': {},
'usa': {}
},
'patterns': [
{
'find': re.compile('([A-ZÄÖa-zäö-]*)(mlk)'),
'replace': r'\1 mlk'
}
]
} | Learning-from-our-past/Kaira | names/location_name_white_list.py | Python | gpl-2.0 | 260 |
#
# exception.py - general exception formatting and saving
#
# Copyright (C) 2000-2013 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Chris Lumens <[email protected]>
# David Cantrell <[email protected]>
# Vratislav Podzimek <[email protected]>
#
from meh import Config
from meh.handler import ExceptionHandler
from meh.dump import ReverseExceptionDump
from pyanaconda import iutil, kickstart
import sys
import os
import shutil
import time
import re
import errno
import glob
import traceback
import blivet.errors
from pyanaconda.errors import CmdlineError
from pyanaconda.ui.communication import hubQ
from pyanaconda.constants import THREAD_EXCEPTION_HANDLING_TEST, IPMI_FAILED
from pyanaconda.threads import threadMgr
from pyanaconda.i18n import _
from pyanaconda import flags
from pyanaconda import startup_utils
from gi.repository import GLib
import logging
log = logging.getLogger("anaconda")
class AnacondaExceptionHandler(ExceptionHandler):
def __init__(self, confObj, intfClass, exnClass, tty_num, gui_lock, interactive):
"""
:see: python-meh's ExceptionHandler
:param tty_num: the number of tty the interface is running on
"""
ExceptionHandler.__init__(self, confObj, intfClass, exnClass)
self._gui_lock = gui_lock
self._intf_tty_num = tty_num
self._interactive = interactive
def _main_loop_handleException(self, dump_info):
"""
Helper method with one argument only so that it can be registered
with GLib.idle_add() to run on idle or called from a handler.
:type dump_info: an instance of the meh.DumpInfo class
"""
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
if (issubclass(ty, blivet.errors.StorageError) and value.hardware_fault) \
or (issubclass(ty, OSError) and value.errno == errno.EIO):
# hardware fault or '[Errno 5] Input/Output error'
hw_error_msg = _("The installation was stopped due to what "
"seems to be a problem with your hardware. "
"The exact error message is:\n\n%s.\n\n "
"The installer will now terminate.") % str(value)
self.intf.messageWindow(_("Hardware error occured"), hw_error_msg)
sys.exit(0)
elif isinstance(value, blivet.errors.UnusableConfigurationError):
sys.exit(0)
else:
super(AnacondaExceptionHandler, self).handleException(dump_info)
return False
def handleException(self, dump_info):
"""
Our own handleException method doing some additional stuff before
calling the original python-meh's one.
:type dump_info: an instance of the meh.DumpInfo class
:see: python-meh's ExceptionHandler.handleException
"""
log.debug("running handleException")
exception_lines = traceback.format_exception(*dump_info.exc_info)
log.critical("\n".join(exception_lines))
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
try:
from gi.repository import Gtk
# XXX: Gtk stopped raising RuntimeError if it fails to
# initialize. Horay! But will it stay like this? Let's be
# cautious and raise the exception on our own to work in both
# cases
initialized = Gtk.init_check(None)[0]
if not initialized:
raise RuntimeError()
# Attempt to grab the GUI initializing lock, do not block
if not self._gui_lock.acquire(False):
# the graphical interface is running, don't crash it by
# running another one potentially from a different thread
log.debug("Gtk running, queuing exception handler to the "
"main loop")
GLib.idle_add(self._main_loop_handleException, dump_info)
else:
log.debug("Gtk not running, starting Gtk and running "
"exception handler in it")
self._main_loop_handleException(dump_info)
except (RuntimeError, ImportError):
log.debug("Gtk cannot be initialized")
# X not running (Gtk cannot be initialized)
if threadMgr.in_main_thread():
log.debug("In the main thread, running exception handler")
if issubclass(ty, CmdlineError) or not self._interactive:
if issubclass(ty, CmdlineError):
cmdline_error_msg = _("\nThe installation was stopped due to "
"incomplete spokes detected while running "
"in non-interactive cmdline mode. Since there "
"cannot be any questions in cmdline mode, "
"edit your kickstart file and retry "
"installation.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
else:
cmdline_error_msg = _("\nRunning in cmdline mode, no interactive debugging "
"allowed.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
# since there is no UI in cmdline mode and it is completely
# non-interactive, we can't show a message window asking the user
# to acknowledge the error; instead, print the error out and sleep
# for a few seconds before exiting the installer
print(cmdline_error_msg)
time.sleep(10)
sys.exit(1)
else:
print("\nAn unknown error has occured, look at the "
"/tmp/anaconda-tb* file(s) for more details")
# in the main thread, run exception handler
self._main_loop_handleException(dump_info)
else:
log.debug("In a non-main thread, sending a message with "
"exception data")
# not in the main thread, just send message with exception
# data and let message handler run the exception handler in
# the main thread
exc_info = dump_info.exc_info
hubQ.send_exception((exc_info.type,
exc_info.value,
exc_info.stack))
def postWriteHook(self, dump_info):
anaconda = dump_info.object
# See if there is a /root present in the root path and put exception there as well
if os.access(iutil.getSysroot() + "/root", os.X_OK):
try:
dest = iutil.getSysroot() + "/root/%s" % os.path.basename(self.exnFile)
shutil.copyfile(self.exnFile, dest)
except (shutil.Error, IOError):
log.error("Failed to copy %s to %s/root", self.exnFile, iutil.getSysroot())
# run kickstart traceback scripts (if necessary)
try:
kickstart.runTracebackScripts(anaconda.ksdata.scripts)
# pylint: disable=bare-except
except:
pass
iutil.ipmi_report(IPMI_FAILED)
def runDebug(self, exc_info):
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(1)
iutil.eintr_retry_call(os.open, "/dev/console", os.O_RDWR) # reclaim stdin
iutil.eintr_retry_call(os.dup2, 0, 1) # reclaim stdout
iutil.eintr_retry_call(os.dup2, 0, 2) # reclaim stderr
# ^
# |
# +------ dup2 is magic, I tells ya!
# bring back the echo
import termios
si = sys.stdin.fileno()
attr = termios.tcgetattr(si)
attr[3] = attr[3] & termios.ECHO
termios.tcsetattr(si, termios.TCSADRAIN, attr)
print("\nEntering debugger...")
print("Use 'continue' command to quit the debugger and get back to "\
"the main window")
import pdb
pdb.post_mortem(exc_info.stack)
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(self._intf_tty_num)
def initExceptionHandling(anaconda):
fileList = ["/tmp/anaconda.log", "/tmp/packaging.log",
"/tmp/program.log", "/tmp/storage.log", "/tmp/ifcfg.log",
"/tmp/dnf.log", "/tmp/dnf.rpm.log",
"/tmp/yum.log", iutil.getSysroot() + "/root/install.log",
"/proc/cmdline"]
if os.path.exists("/tmp/syslog"):
fileList.extend(["/tmp/syslog"])
if anaconda.opts and anaconda.opts.ksfile:
fileList.extend([anaconda.opts.ksfile])
conf = Config(programName="anaconda",
programVersion=startup_utils.get_anaconda_version_string(),
programArch=os.uname()[4],
attrSkipList=["_intf._actions",
"_intf._currentAction._xklwrapper",
"_intf._currentAction._spokes[\"KeyboardSpoke\"]._xkl_wrapper",
"_intf._currentAction._storage_playground",
"_intf._currentAction._spokes[\"CustomPartitioningSpoke\"]._storage_playground",
"_intf._currentAction.language.translations",
"_intf._currentAction.language.locales",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._oldweak",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._oldweak",
"_intf.storage.bootloader.password",
"_intf.storage.data",
"_intf.storage.encryptionPassphrase",
"_bootloader.encrypted_password",
"_bootloader.password",
"payload._groups"],
localSkipList=["passphrase", "password", "_oldweak", "_password"],
fileList=fileList)
conf.register_callback("lsblk_output", lsblk_callback, attchmnt_only=True)
conf.register_callback("nmcli_dev_list", nmcli_dev_list_callback,
attchmnt_only=True)
conf.register_callback("type", lambda: "anaconda", attchmnt_only=True)
conf.register_callback("addons", list_addons_callback, attchmnt_only=False)
if "/tmp/syslog" not in fileList:
# no syslog, grab output from journalctl and put it also to the
# anaconda-tb file
conf.register_callback("journalctl", journalctl_callback, attchmnt_only=False)
interactive = not anaconda.displayMode == 'c'
handler = AnacondaExceptionHandler(conf, anaconda.intf.meh_interface,
ReverseExceptionDump, anaconda.intf.tty_num,
anaconda.gui_initialized, interactive)
handler.install(anaconda)
return conf
def lsblk_callback():
"""Callback to get info about block devices."""
return iutil.execWithCapture("lsblk", ["--perms", "--fs", "--bytes"])
def nmcli_dev_list_callback():
"""Callback to get info about network devices."""
return iutil.execWithCapture("nmcli", ["device", "show"])
def journalctl_callback():
"""Callback to get logs from journalctl."""
# regex to filter log messages from anaconda's process (we have that in our
# logs)
anaconda_log_line = re.compile(r"\[%d\]:" % os.getpid())
ret = ""
for line in iutil.execReadlines("journalctl", ["-b"]):
if anaconda_log_line.search(line) is None:
# not an anaconda's message
ret += line + "\n"
return ret
def list_addons_callback():
"""
Callback to get info about the addons potentially affecting Anaconda's
behaviour.
"""
# list available addons and take their package names
addon_pkgs = glob.glob("/usr/share/anaconda/addons/*")
return ", ".join(addon.rsplit("/", 1)[1] for addon in addon_pkgs)
def test_exception_handling():
"""
Function that can be used for testing exception handling in anaconda. It
tries to prepare a worst case scenario designed from bugs seen so far.
"""
# XXX: this is a huge hack, but probably the only way, how we can get
# "unique" stack and thus unique hash and new bugreport
def raise_exception(msg, non_ascii):
timestamp = str(time.time()).split(".", 1)[0]
code = """
def f%s(msg, non_ascii):
raise RuntimeError(msg)
f%s(msg, non_ascii)
""" % (timestamp, timestamp)
eval(compile(code, "str_eval", "exec"))
# test non-ascii characters dumping
non_ascii = u'\u0159'
msg = "NOTABUG: testing exception handling"
# raise exception from a separate thread
from pyanaconda.threads import AnacondaThread
threadMgr.add(AnacondaThread(name=THREAD_EXCEPTION_HANDLING_TEST,
target=raise_exception,
args=(msg, non_ascii)))
| maxamillion/anaconda | pyanaconda/exception.py | Python | gpl-2.0 | 14,461 |
#
# Copyright 2015-2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import marshal
import pickle
from testlib import VdsmTestCase
from testlib import expandPermutations, permutations
from vdsm.common.compat import json
from vdsm.common.password import (
ProtectedPassword,
protect_passwords,
unprotect_passwords,
)
class ProtectedPasswordTests(VdsmTestCase):
def test_str(self):
p = ProtectedPassword("12345678")
self.assertNotIn("12345678", str(p))
def test_repr(self):
p = ProtectedPassword("12345678")
self.assertNotIn("12345678", repr(p))
def test_value(self):
p = ProtectedPassword("12345678")
self.assertEqual("12345678", p.value)
def test_eq(self):
p1 = ProtectedPassword("12345678")
p2 = ProtectedPassword("12345678")
self.assertEqual(p1, p2)
def test_ne(self):
p1 = ProtectedPassword("12345678")
p2 = ProtectedPassword("12345678")
self.assertFalse(p1 != p2)
def test_pickle_copy(self):
p1 = ProtectedPassword("12345678")
p2 = pickle.loads(pickle.dumps(p1))
self.assertEqual(p1, p2)
def test_no_marshal(self):
p1 = ProtectedPassword("12345678")
self.assertRaises(ValueError, marshal.dumps, p1)
def test_no_json(self):
p1 = ProtectedPassword("12345678")
self.assertRaises(TypeError, json.dumps, p1)
@expandPermutations
class ProtectTests(VdsmTestCase):
@permutations([[list()], [dict()], [tuple()]])
def test_protect_empty(self, params):
self.assertEqual(params, protect_passwords(params))
@permutations([[list()], [dict()], [tuple()]])
def test_unprotect_empty(self, result):
self.assertEqual(result, unprotect_passwords(result))
def test_protect_dict(self):
unprotected = dict_unprotected()
protected = dict_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_dict(self):
protected = dict_protected()
unprotected = dict_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_nested_dicts(self):
unprotected = nested_dicts_unprotected()
protected = nested_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_nested_dicts(self):
protected = nested_dicts_protected()
unprotected = nested_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_lists_of_dicts(self):
unprotected = lists_of_dicts_unprotected()
protected = lists_of_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_lists_of_dicts(self):
protected = lists_of_dicts_protected()
unprotected = lists_of_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def test_protect_nested_lists_of_dicts(self):
unprotected = nested_lists_of_dicts_unprotected()
protected = nested_lists_of_dicts_protected()
self.assertEqual(protected, protect_passwords(unprotected))
def test_unprotect_nested_lists_of_dicts(self):
protected = nested_lists_of_dicts_protected()
unprotected = nested_lists_of_dicts_unprotected()
self.assertEqual(unprotected, unprotect_passwords(protected))
def dict_unprotected():
return {
"key": "value",
"_X_key": "secret",
"password": "12345678"
}
def dict_protected():
return {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678")
}
def nested_dicts_unprotected():
return {
"key": "value",
"_X_key": "secret",
"nested": {
"password": "12345678",
"nested": {
"key": "value",
"_X_key": "secret",
"password": "87654321",
}
}
}
def nested_dicts_protected():
return {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"nested": {
"password": ProtectedPassword("12345678"),
"nested": {
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("87654321"),
}
}
}
def lists_of_dicts_unprotected():
return [
{
"key": "value",
"_X_key": "secret",
"password": "12345678",
},
{
"key": "value",
"_X_key": "secret",
"password": "87654321",
}
]
def lists_of_dicts_protected():
return [
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678"),
},
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("87654321"),
}
]
def nested_lists_of_dicts_unprotected():
return {
"key": "value",
"nested": [
{
"key": "value",
"nested": [
{
"key": "value",
"_X_key": "secret",
"password": "12345678",
}
]
}
]
}
def nested_lists_of_dicts_protected():
return {
"key": "value",
"nested": [
{
"key": "value",
"nested": [
{
"key": "value",
"_X_key": ProtectedPassword("secret"),
"password": ProtectedPassword("12345678"),
}
]
}
]
}
| nirs/vdsm | tests/passwords_test.py | Python | gpl-2.0 | 6,754 |
from ij import IJ
from ij.gui import NonBlockingGenericDialog
from ij import WindowManager
from ij.gui import WaitForUserDialog
from ij import ImageStack
from ij import ImagePlus
theImage = IJ.getImage()
sourceImages = []
if theImage.getNChannels() == 1:
IJ.run("8-bit")
sourceImages.append(theImage)
else:
sourceImages = ChannelSplitter.split(theImage)
sourceNames = []
for im in sourceImages:
im.show()
sourceNames.append(im.getTitle())
gd0 = NonBlockingGenericDialog("Select source image...")
gd0.addChoice("Source image",sourceNames,sourceNames[0])
gd0.showDialog()
if (gd0.wasOKed()):
chosenImage = gd0.getNextChoice()
theImage = WindowManager.getImage(chosenImage)
IJ.selectWindow(chosenImage)
else:
theImage = sourceImages[0]
IJ.selectWindow(sourceNames[0])
gd = NonBlockingGenericDialog("Set slice params...")
gd.addNumericField("Slice start:",1,0)
gd.addNumericField("Slice end:",theImage.getNSlices(),0)
gd.showDialog()
if (gd.wasOKed()):
## Selecting the ROI over the stack
startSlice = int(gd.getNextNumber())
endSlice = gd.getNextNumber()
width = theImage.getWidth()
height = theImage.getHeight()
roiArray = []
for i in range(startSlice,endSlice+1):
theImage.setSlice(i)
bp = theImage.getProcessor().duplicate()
bp.setColor(0)
doStaySlice = True
while doStaySlice:
waiter = WaitForUserDialog("Draw ROI","Draw ROI, then hit OK")
waiter.show()
roi = theImage.getRoi()
if roi is None:
doStaySlice = True
else:
doStaySlice = False
roiArray.append(roi)
## Applying the ROI to each channel
newStacks = []
castImages = []
for procImage in sourceImages:
newStacks.append(ImageStack(width,height))
ns = newStacks[-1]
for i in range(startSlice,endSlice+1):
procImage.setSliceWithoutUpdate(i)
bp = procImage.getProcessor().duplicate()
bp.fillOutside(roiArray[i-startSlice])
ns.addSlice(bp)
castImages.append(ImagePlus(procImage.getShortTitle()+"_cast",ns))
## Displays the output
for castImage in castImages:
castImage.show()
## Cleans up the windows
for sourceImage in sourceImages:
sourceImage.close()
| stalepig/deep-mucosal-imaging | dmi_0.3/Isolate_stack_ROI2.py | Python | gpl-2.0 | 2,120 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Add data_migration table
Revision ID: 2e171e6198e6
Revises: 15d3fad78656
Create Date: 2016-08-03 11:11:55.680872
"""
# revision identifiers, used by Alembic.
revision = '2e171e6198e6'
down_revision = '15d3fad78656'
from alembic import op
from sqlalchemy import Column, Integer, Unicode, DateTime
def upgrade():
op.create_table('data_migration',
Column('id', Integer, primary_key=True),
Column('name', Unicode(255), nullable=False, unique=True),
Column('finish_time', DateTime),
mysql_engine='InnoDB')
def downgrade():
op.drop_table('data_migration')
| jtoppins/beaker | Server/bkr/server/alembic/versions/2e171e6198e6_add_data_migration_table.py | Python | gpl-2.0 | 863 |
#!/usr/bin/env python
# Dependencies.py - discover, read, and write dependencies file for make.
# The format like the output from "g++ -MM" which produces a
# list of header (.h) files used by source files (.cxx).
# As a module, provides
# FindPathToHeader(header, includePath) -> path
# FindHeadersInFile(filePath) -> [headers]
# FindHeadersInFileRecursive(filePath, includePath, renames) -> [paths]
# FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames) -> [dependencies]
# ExtractDependencies(input) -> [dependencies]
# TextFromDependencies(dependencies)
# WriteDependencies(output, dependencies)
# UpdateDependencies(filepath, dependencies)
# PathStem(p) -> stem
# InsertSynonym(dependencies, current, additional) -> [dependencies]
# If run as a script reads from stdin and writes to stdout.
# Only tested with ASCII file names.
# Copyright 2019 by Neil Hodgson <[email protected]>
# The License.txt file describes the conditions under which this software may be distributed.
# Requires Python 2.7 or later
import codecs, glob, os, sys
if __name__ == "__main__":
import FileGenerator
else:
from . import FileGenerator
continuationLineEnd = " \\"
def FindPathToHeader(header, includePath):
for incDir in includePath:
relPath = os.path.join(incDir, header)
if os.path.exists(relPath):
return relPath
return ""
fhifCache = {} # Remember the includes in each file. ~5x speed up.
def FindHeadersInFile(filePath):
if filePath not in fhifCache:
headers = []
with codecs.open(filePath, "r", "utf-8") as f:
for line in f:
if line.strip().startswith("#include"):
parts = line.split()
if len(parts) > 1:
header = parts[1]
if header[0] != '<': # No system headers
headers.append(header.strip('"'))
fhifCache[filePath] = headers
return fhifCache[filePath]
def FindHeadersInFileRecursive(filePath, includePath, renames):
headerPaths = []
for header in FindHeadersInFile(filePath):
if header in renames:
header = renames[header]
relPath = FindPathToHeader(header, includePath)
if relPath and relPath not in headerPaths:
headerPaths.append(relPath)
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
return headerPaths
def RemoveStart(relPath, start):
if relPath.startswith(start):
return relPath[len(start):]
return relPath
def ciKey(f):
return f.lower()
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
deps = []
for sourceGlob in sourceGlobs:
sourceFiles = glob.glob(sourceGlob)
# Sorting the files minimizes deltas as order returned by OS may be arbitrary
sourceFiles.sort(key=ciKey)
for sourceName in sourceFiles:
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
depsForSource = [sourceName] + headerPaths
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
fn in depsForSource]
deps.append([objName, depsToAppend])
return deps
def PathStem(p):
""" Return the stem of a filename: "CallTip.o" -> "CallTip" """
return os.path.splitext(os.path.basename(p))[0]
def InsertSynonym(dependencies, current, additional):
""" Insert a copy of one object file with dependencies under a different name.
Used when one source file is used to create two object files with different
preprocessor definitions. """
result = []
for dep in dependencies:
result.append(dep)
if (dep[0] == current):
depAdd = [additional, dep[1]]
result.append(depAdd)
return result
def ExtractDependencies(input):
""" Create a list of dependencies from input list of lines
Each element contains the name of the object and a list of
files that it depends on.
Dependencies that contain "/usr/" are removed as they are system headers. """
deps = []
for line in input:
headersLine = line.startswith(" ") or line.startswith("\t")
line = line.strip()
isContinued = line.endswith("\\")
line = line.rstrip("\\ ")
fileNames = line.strip().split(" ")
if not headersLine:
# its a source file line, there may be headers too
sourceLine = fileNames[0].rstrip(":")
fileNames = fileNames[1:]
deps.append([sourceLine, []])
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
return deps
def TextFromDependencies(dependencies):
""" Convert a list of dependencies to text. """
text = ""
indentHeaders = "\t"
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
for dep in dependencies:
object, headers = dep
text += object + ":"
for header in headers:
text += joinHeaders
text += header
if headers:
text += os.linesep
return text
def UpdateDependencies(filepath, dependencies, comment=""):
""" Write a dependencies file if different from dependencies. """
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
TextFromDependencies(dependencies))
def WriteDependencies(output, dependencies):
""" Write a list of dependencies out to a stream. """
output.write(TextFromDependencies(dependencies))
if __name__ == "__main__":
""" Act as a filter that reformats input dependencies to one per line. """
inputLines = sys.stdin.readlines()
deps = ExtractDependencies(inputLines)
WriteDependencies(sys.stdout, deps)
| apmckinlay/csuneido | vs2019scintilla/scripts/Dependencies.py | Python | gpl-2.0 | 5,533 |
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""HEPData module test cases."""
def test_version():
"""Test version import."""
from hepdata import __version__
assert __version__
| HEPData/hepdata3 | tests/hepdata_test.py | Python | gpl-2.0 | 1,117 |
#!/bin/env python
import sys
import os
args = sys.argv[1:]
files = args[0:-1]
newdir = args[-1]
for file in files:
cmd = "svn mv %s %s/" % (file,newdir)
print cmd
os.system(cmd)
| chryswoods/Sire | corelib/build/svnmvall.py | Python | gpl-2.0 | 200 |
#!/usr/bin/env python
#############################################################
# ubi_reader/ubi_io
# (c) 2013 Jason Pruitt ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################
from ubi.block import sort
class ubi_file(object):
"""UBI image file object
Arguments:
Str:path -- Path to file to parse
Int:block_size -- Erase block size of NAND in bytes.
Int:start_offset -- (optional) Where to start looking in the file for
UBI data.
Int:end_offset -- (optional) Where to stop looking in the file.
Methods:
seek -- Put file head to specified byte offset.
Int:offset
read -- Read specified bytes from file handle.
Int:size
tell -- Returns byte offset of current file location.
read_block -- Returns complete PEB data of provided block
description.
Obj:block
read_block_data -- Returns LEB data only from provided block.
Obj:block
reader -- Generator that returns data from file.
reset -- Reset file position to start_offset
Handles all the actual file interactions, read, seek,
extract blocks, etc.
"""
def __init__(self, path, block_size, start_offset=0, end_offset=None):
self._fhandle = open(path, 'rb')
self._start_offset = start_offset
if end_offset:
self._end_offset = end_offset
else:
self._fhandle.seek(0, 2)
self._end_offset = self.tell()
self._block_size = block_size
if start_offset >= self._end_offset:
raise Exception('Start offset larger than file size!')
self._fhandle.seek(self._start_offset)
def _set_start(self, i):
self._start_offset = i
def _get_start(self):
return self._start_offset
start_offset = property(_get_start, _set_start)
def _get_end(self):
return self._end_offset
end_offset = property(_get_end)
def _get_block_size(self):
return self._block_size
block_size = property(_get_block_size)
def seek(self, offset):
self._fhandle.seek(offset)
def read(self, size):
return self._fhandle.read(size)
def tell(self):
return self._fhandle.tell()
def reset(self):
self._fhandle.seek(self.start_offset)
def reader(self):
self.reset()
while True:
cur_loc = self._fhandle.tell()
if self.end_offset and cur_loc > self.end_offset:
break
elif self.end_offset and self.end_offset - cur_loc < self.block_size:
chunk_size = self.end_offset - cur_loc
else:
chunk_size = self.block_size
buf = self.read(chunk_size)
if not buf:
break
yield buf
def read_block(self, block):
"""Read complete PEB data from file.
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset)
return self._fhandle.read(block.size)
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf
class leb_virtual_file():
def __init__(self, ubi, volume):
self._ubi = ubi
self._volume = volume
self._blocks = sort.by_leb(self._volume.get_blocks(self._ubi.blocks))
self._seek = 0
self.leb_data_size = len(self._blocks) * self._ubi.leb_size
self._last_leb = -1
self._last_buf = ''
def read(self, i):
buf = ''
leb = int(self.tell() / self._ubi.leb_size)
offset = self.tell() % self._ubi.leb_size
if leb == self._last_leb:
self.seek(self.tell() + i)
return self._last_buf[offset:offset + i]
else:
buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]])
self._last_buf = buf
self._last_leb = leb
self.seek(self.tell() + i)
return buf[offset:offset + i]
def reset(self):
self.seek(0)
def seek(self, offset):
self._seek = offset
def tell(self):
return self._seek
def reader(self):
last_leb = 0
for block in self._blocks:
while 0 != (self._ubi.blocks[block].leb_num - last_leb):
last_leb += 1
yield '\xff' * self._ubi.leb_size
last_leb += 1
yield self._ubi.file.read_block_data(self._ubi.blocks[block])
| Dima73/pli-openmultibootmanager | src/ubi_reader/ubi_io/__init__.py | Python | gpl-2.0 | 5,452 |
# Copyright (c) 2011 Nick Hurley <hurley at todesschaf dot org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Helpers for git extensions written in python
"""
import inspect
import os
import subprocess
import sys
import traceback
config = {}
def __extract_name_email(info, type_):
"""Extract a name and email from a string in the form:
User Name <[email protected]> tstamp offset
Stick that into our config dict for either git committer or git author.
"""
val = ' '.join(info.split(' ')[:-2])
angle = val.find('<')
if angle > -1:
config['GIT_%s_NAME' % type_] = val[:angle - 1]
config['GIT_%s_EMAIL' % type_] = val[angle + 1:-1]
else:
config['GIT_%s_NAME' % type_] = val
def __create_config():
"""Create our configuration dict from git and the env variables we're given.
"""
devnull = file('/dev/null', 'w')
# Stick all our git variables in our dict, just in case anyone needs them
gitvar = subprocess.Popen(['git', 'var', '-l'], stdout=subprocess.PIPE,
stderr=devnull)
for line in gitvar.stdout:
k, v = line.split('=', 1)
if k == 'GIT_COMMITTER_IDENT':
__extract_name_email(v, 'COMMITTER')
elif k == 'GIT_AUTHOR_IDENT':
__extract_name_email(v, 'AUTHOR')
elif v == 'true':
v = True
elif v == 'false':
v = False
else:
try:
v = int(v)
except:
pass
config[k] = v
gitvar.wait()
# Find out where git's sub-exes live
gitexec = subprocess.Popen(['git', '--exec-path'], stdout=subprocess.PIPE,
stderr=devnull)
config['GIT_LIBEXEC'] = gitexec.stdout.readlines()[0].strip()
gitexec.wait()
# Figure out the git dir in our repo, if applicable
gitdir = subprocess.Popen(['git', 'rev-parse', '--git-dir'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gitdir.stdout.readlines()
if gitdir.wait() == 0:
config['GIT_DIR'] = lines[0].strip()
# Figure out the top level of our repo, if applicable
gittoplevel = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gittoplevel.stdout.readlines()
if gittoplevel.wait() == 0:
config['GIT_TOPLEVEL'] = lines[0].strip()
# We may have been called by a wrapper that passes us some info through the
# environment. Use it if it's there
for k, v in os.environ.iteritems():
if k.startswith('PY_GIT_'):
config[k[3:]] = v
elif k == 'PGL_OK':
config['PGL_OK'] = True
# Make sure our git dir and toplevel are fully-qualified
if 'GIT_DIR' in config and not os.path.isabs(config['GIT_DIR']):
git_dir = os.path.join(config['GIT_TOPLEVEL'], config['GIT_DIR'])
config['GIT_DIR'] = os.path.abspath(git_dir)
def warn(msg):
"""Print a warning
"""
sys.stderr.write('%s\n' % (msg,))
def die(msg):
"""Print an error message and exit the program
"""
sys.stderr.write('%s\n' % (msg,))
sys.exit(1)
def do_checks():
"""Check to ensure we've got everything we expect
"""
try:
import argparse
except:
die('Your python must support the argparse module')
def main(_main):
"""Mark a function as the main function for our git subprogram. Based
very heavily on automain by Gerald Kaszuba, but with modifications to make
it work better for our purposes.
"""
parent = inspect.stack()[1][0]
name = parent.f_locals.get('__name__', None)
if name == '__main__':
__create_config()
if 'PGL_OK' not in config:
do_checks()
rval = 1
try:
rval = _main()
except Exception, e:
sys.stdout.write('%s\n' % str(e))
f = file('pygit.tb', 'w')
traceback.print_tb(sys.exc_info()[2], None, f)
f.close()
sys.exit(rval)
return _main
if __name__ == '__main__':
"""If we get run as a script, check to make sure it's all ok and exit with
an appropriate error code
"""
do_checks()
sys.exit(0)
| todesschaf/pgl | pgl.py | Python | gpl-2.0 | 4,786 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import cxx
from waflib import Task,Utils,Options,Errors,Context
from waflib.TaskGen import feature,after_method,extension
from waflib.Configure import conf
from waflib import Logs
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT5=['.cpp','.cc','.cxx','.C']
QT5_LIBS='''
qtmain
Qt5Bluetooth
Qt5CLucene
Qt5Concurrent
Qt5Core
Qt5DBus
Qt5Declarative
Qt5DesignerComponents
Qt5Designer
Qt5Gui
Qt5Help
Qt5MultimediaQuick_p
Qt5Multimedia
Qt5MultimediaWidgets
Qt5Network
Qt5Nfc
Qt5OpenGL
Qt5Positioning
Qt5PrintSupport
Qt5Qml
Qt5QuickParticles
Qt5Quick
Qt5QuickTest
Qt5Script
Qt5ScriptTools
Qt5Sensors
Qt5SerialPort
Qt5Sql
Qt5Svg
Qt5Test
Qt5WebKit
Qt5WebKitWidgets
Qt5Widgets
Qt5WinExtras
Qt5X11Extras
Qt5XmlPatterns
Qt5Xml'''
class qxx(Task.classes['cxx']):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def create_moc_task(self,h_node,m_node):
try:
moc_cache=self.generator.bld.moc_cache
except AttributeError:
moc_cache=self.generator.bld.moc_cache={}
try:
return moc_cache[h_node]
except KeyError:
tsk=moc_cache[h_node]=Task.classes['moc'](env=self.env,generator=self.generator)
tsk.set_inputs(h_node)
tsk.set_outputs(m_node)
if self.generator:
self.generator.tasks.append(tsk)
gen=self.generator.bld.producer
gen.outstanding.insert(0,tsk)
gen.total+=1
return tsk
else:
delattr(self,'cache_sig')
def moc_h_ext(self):
try:
ext=Options.options.qt_header_ext.split()
except AttributeError:
pass
if not ext:
ext=MOC_H
return ext
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
include_nodes=[node.parent]+self.generator.includes_nodes
moctasks=[]
mocfiles=set([])
for d in bld.raw_deps.get(self.uid(),[]):
if not d.endswith('.moc'):
continue
if d in mocfiles:
continue
mocfiles.add(d)
h_node=None
base2=d[:-4]
for x in include_nodes:
for e in self.moc_h_ext():
h_node=x.find_node(base2+e)
if h_node:
break
if h_node:
m_node=h_node.change_ext('.moc')
break
else:
for k in EXT_QT5:
if base2.endswith(k):
for x in include_nodes:
h_node=x.find_node(base2)
if h_node:
break
if h_node:
m_node=h_node.change_ext(k+'.moc')
break
if not h_node:
raise Errors.WafError('No source found for %r which is a moc file'%d)
task=self.create_moc_task(h_node,m_node)
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
@extension(*EXT_UI)
def create_uic_task(self,node):
uictask=self.create_task('ui5',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
@extension('.ts')
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
@feature('qt5')
@after_method('apply_link')
def apply_qt5(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt5:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in('-D','-I','/D','/I'):
if(f[0]=='/'):
lst.append('-'+flag[1:])
else:
lst.append(flag)
self.env.append_value('MOC_FLAGS',lst)
@extension(*EXT_QT5)
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return([],[])
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath(),'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui5(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
self.find_qt5_binaries()
self.set_qt5_libs_to_check()
self.set_qt5_defines()
self.find_qt5_libraries()
self.add_qt5_rpath()
self.simplify_qt5_libs()
@conf
def find_qt5_binaries(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=os.environ.get('QT5_ROOT','')
qtbin=os.environ.get('QT5_BIN',None)or os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt5/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['5','0','0']
for qmk in('qmake-qt5','qmake5','qmake'):
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log(qmake+['-query','QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if cand:
self.env.QMAKE=cand
else:
self.fatal('Could not find qmake for qt5')
qtbin=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_BINS']).strip()+os.sep
def find_bin(lst,var):
if var in env:
return
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt5','uic'],'QT_UIC')
if not env.QT_UIC:
self.fatal('cannot find the uic compiler for qt5')
self.start_msg('Checking for uic version')
uicver=self.cmd_and_log(env.QT_UIC+["-version"],output=Context.BOTH)
uicver=''.join(uicver).strip()
uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','')
self.end_msg(uicver)
if uicver.find(' 3.')!=-1 or uicver.find(' 4.')!=-1:
self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path')
find_bin(['moc-qt5','moc'],'QT_MOC')
find_bin(['rcc-qt5','rcc'],'QT_RCC')
find_bin(['lrelease-qt5','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt5','lupdate'],'QT_LUPDATE')
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
env.MOCCPPPATH_ST='-I%s'
env.MOCDEFINES_ST='-D%s'
@conf
def find_qt5_libraries(self):
qtlibs=getattr(Options.options,'qtlibs',None)or os.environ.get("QT5_LIBDIR",None)
if not qtlibs:
try:
qtlibs=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtlibs=os.path.join(qtdir,'lib')
self.msg('Found the Qt5 libraries in',qtlibs)
qtincludes=os.environ.get("QT5_INCLUDES",None)or self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_HEADERS']).strip()
env=self.env
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib'%(qtlibs,qtlibs)
try:
if os.environ.get("QT5_XCOMPILE",None):
raise self.errors.ConfigurationError()
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt5_vars:
uselib=i.upper()
if Utils.unversioned_sys_platform()=="darwin":
frameworkName=i+".framework"
qtDynamicLib=os.path.join(qtlibs,frameworkName,i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers'))
elif env.DEST_OS!="win32":
qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so")
qtStaticLib=os.path.join(qtlibs,"lib"+i+".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtStaticLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for k in("lib%s.a","lib%s5.a","%s.lib","%s5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
uselib=i.upper()+"_debug"
for k in("lib%sd.a","lib%sd5.a","%sd.lib","%sd5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for i in self.qt5_vars_debug+self.qt5_vars:
self.check_cfg(package=i,args='--cflags --libs',mandatory=False)
@conf
def simplify_qt5_libs(self):
env=self.env
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(self.qt5_vars,'LIBPATH_QTCORE')
process_lib(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def add_qt5_rpath(self):
env=self.env
if getattr(Options.options,'want_rpath',False):
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(self.qt5_vars,'LIBPATH_QTCORE')
process_rpath(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def set_qt5_libs_to_check(self):
if not hasattr(self,'qt5_vars'):
self.qt5_vars=QT5_LIBS
self.qt5_vars=Utils.to_list(self.qt5_vars)
if not hasattr(self,'qt5_vars_debug'):
self.qt5_vars_debug=[a+'_debug'for a in self.qt5_vars]
self.qt5_vars_debug=Utils.to_list(self.qt5_vars_debug)
@conf
def set_qt5_defines(self):
if sys.platform!='win32':
return
for x in self.qt5_vars:
y=x[2:].upper()
self.env.append_unique('DEFINES_%s'%x.upper(),'QT_%s_LIB'%y)
self.env.append_unique('DEFINES_%s_DEBUG'%x.upper(),'QT_%s_LIB'%y)
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt5",default=False)
| oli-kester/advanced-av-examples | amp-osc-lv2/.waf-1.8.5-3556be08f33a5066528395b11fed89fa/waflib/Tools/qt5.py | Python | gpl-2.0 | 14,329 |
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Author: Michael Cohen [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Tests for json encoding/decoding."""
import json
import logging
from rekall import testlib
from rekall.ui import json_renderer
class JsonTest(testlib.RekallBaseUnitTestCase):
"""Test the Json encode/decoder."""
PLUGIN = "json_render"
def setUp(self):
self.session = self.MakeUserSession()
self.renderer = json_renderer.JsonRenderer(session=self.session)
self.encoder = self.renderer.encoder
self.decoder = self.renderer.decoder
def testObjectRenderer(self):
cases = [
('\xff\xff\x00\x00', {'mro': u'str:basestring:object',
'b64': u'//8AAA=='}),
("hello", u'hello'), # A string is converted into unicode if
# possible.
(1, 1), # Ints are already JSON serializable.
(dict(foo=2), {'foo': 2}),
(set([1, 2, 3]), {'mro': u'set:object', 'data': [1, 2, 3]}),
([1, 2, 3], [1, 2, 3]),
([1, "\xff\xff\x00\x00", 3], [1, {'mro': u'str:basestring:object',
'b64': u'//8AAA=='}, 3]),
]
for case in cases:
encoded = self.encoder.Encode(case[0])
self.assertEqual(encoded, case[1])
def testProperSerialization(self):
"""Test that serializing simple python objects with json works.
NOTE: Json is not intrinsically a fully functional serialization format
- it is unable to serialize many common python primitives (e.g. strings,
dicts with numeric keys etc). This tests that our wrapping around the
json format allows the correct serialization of python primitives.
"""
for case in [
[1, 2],
[1, "hello"],
["1", "2"],
["hello", u'Gr\xfcetzi'],
"hello",
u'Gr\xfcetzi',
dict(a="hello"),
dict(b=dict(a="hello")), # Nested dict.
]:
self.encoder.flush()
data = self.encoder.Encode(case)
logging.debug("%s->%s" % (case, data))
# Make sure the data is JSON serializable.
self.assertEqual(data, json.loads(json.dumps(data)))
self.decoder.SetLexicon(self.encoder.GetLexicon())
self.assertEqual(case, self.decoder.Decode(data))
def testObjectSerization(self):
"""Serialize _EPROCESS objects.
We check that the deserialized object is an exact replica of the
original - this includes the same address spaces, profile and offset.
Having the objects identical allows us to dereference object members
seamlessly.
"""
for task in self.session.plugins.pslist().filter_processes():
self.encoder.flush()
data = self.encoder.Encode(task)
logging.debug("%r->%s" % (task, data))
# Make sure the data is JSON serializable.
self.assertEqual(data, json.loads(json.dumps(data)))
self.decoder.SetLexicon(self.encoder.GetLexicon())
decoded_task = self.decoder.Decode(data)
self.assertEqual(task.obj_offset, decoded_task.obj_offset)
self.assertEqual(task.obj_name, decoded_task.obj_name)
self.assertEqual(task.obj_vm.name, decoded_task.obj_vm.name)
# Check the process name is the same - this tests subfield
# dereferencing.
self.assertEqual(task.name, decoded_task.name)
self.assertEqual(task.pid, decoded_task.pid)
def testAllObjectSerialization(self):
for vtype in self.session.profile.vtypes:
obj = self.session.profile.Object(vtype)
self.CheckObjectSerization(obj)
self.CheckObjectSerization(self.session.profile)
self.CheckObjectSerization(self.session.kernel_address_space)
self.CheckObjectSerization(self.session.physical_address_space)
# Some native types.
self.CheckObjectSerization(set([1, 2, 3]))
self.CheckObjectSerization(dict(a=1, b=dict(a=1)))
def CheckObjectSerization(self, obj):
object_renderer_cls = json_renderer.JsonObjectRenderer.ForTarget(
obj, "JsonRenderer")
renderer = json_renderer.JsonRenderer(session=self.session)
object_renderer = object_renderer_cls(
session=self.session, renderer=renderer)
encoded = object_renderer.EncodeToJsonSafe(obj, strict=True)
# Make sure it is json safe.
json.dumps(encoded)
# Now decode it.
decoding_object_renderer_cls = json_renderer.JsonObjectRenderer.FromEncoded(
encoded, "JsonRenderer")
self.assertEqual(decoding_object_renderer_cls, object_renderer_cls)
decoded = object_renderer.DecodeFromJsonSafe(encoded, {})
self.assertEqual(decoded, obj)
# Now check the DataExportRenderer.
object_renderer_cls = json_renderer.JsonObjectRenderer.ForTarget(
obj, "DataExportRenderer")
object_renderer = object_renderer_cls(session=self.session,
renderer="DataExportRenderer")
encoded = object_renderer.EncodeToJsonSafe(obj, strict=True)
# Make sure it is json safe.
json.dumps(encoded)
# Data Export is not decodable.
| chen0031/rekall | rekall-core/rekall/plugins/tools/json_test.py | Python | gpl-2.0 | 6,226 |
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <[email protected]>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <[email protected]>
# Copyright 2012 Marti Raudsepp <[email protected]>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, files=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next() or raises StopIteration.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 1000000
return super(Reader, self).seek_realtime(int(realtime))
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either
seconds or a datetime.timedelta instance. Argument `bootid`
is a string or UUID representing which boot the monotonic time
is reference to. Defaults to current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.totalseconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.get_hex()
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.get_hex()
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID equal to current boot ID or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.get_hex()
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
elif isinstance(value, int):
return field + '=' + str(value)
else:
return field + '=' + value
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier, priority=LOG_DEBUG, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted
text strings are written to the journal.
The file will be line buffered, so messages are actually sent
after a newline character is written.
>>> stream = journal.stream('myapp')
>>> stream
<open file '<fdopen>', mode 'w' at 0x...>
>>> stream.write('message...\n')
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
Using the interface with print might be more convinient:
>>> from __future__ import print_function
>>> print('message...', file=stream)
priority is the syslog priority, one of `LOG_EMERG`,
`LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`,
`LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority
level prefixes (such as '<1>') are interpreted. See
sd-daemon(3) for more information.
"""
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an
overview: http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(journal.JournalHandler())
>>> log.warn("Some message: %s", detail)
Note that by default, message levels `INFO` and `DEBUG` are
ignored by the logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where
they come from, attach it to the root logger:
>>> logging.root.addHandler(journal.JournalHandler())
For more complex configurations when using `dictConfig` or
`fileConfig`, specify `systemd.journal.JournalHandler` as the
handler class. Only standard handler configuration options
are supported: `level`, `formatter`, `filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warn("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this
handler can be specified as keyword arguments. This probably
makes sense only for SYSLOG_IDENTIFIER and similar fields
which are constant for the whole program:
>>> journal.JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
The following journal fields will be sent:
`MESSAGE`, `PRIORITY`, `THREAD_NAME`, `CODE_FILE`, `CODE_LINE`,
`CODE_FUNC`, `LOGGER` (name as supplied to getLogger call),
`MESSAGE_ID` (optional, see above), `SYSLOG_IDENTIFIER` (defaults
to sys.argv[0]).
"""
def __init__(self, level=_logging.NOTSET, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self._extra = kwargs
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**self._extra)
except Exception:
self.handleError(record)
@staticmethod
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
| hach-que/systemd-packaged | src/python-systemd/journal.py | Python | gpl-2.0 | 20,444 |
# -*- test-case-name: twisted.test.test_newcred -*-
from twisted.internet import defer
from twisted.python import components, failure
from twisted.cred import error, credentials
class ICredentialsChecker(components.Interface):
"""I check sub-interfaces of ICredentials.
@cvar credentialInterfaces: A list of sub-interfaces of ICredentials which
specifies which I may check.
"""
def requestAvatarId(self, credentials):
"""
@param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
A note on anonymity - We do not want None as the value for anonymous
because it is too easy to accidentally return it. We do not want the
empty string, because it is too easy to mistype a password file. For
example, an .htpasswd file may contain the lines: ['hello:asdf',
'world:asdf', 'goodbye', ':world']. This misconfiguration will have an
ill effect in any case, but accidentally granting anonymous access is a
worse failure mode than simply granting access to an untypeable
username. We do not want an instance of 'object', because that would
create potential problems with persistence.
"""
ANONYMOUS = ()
class AllowAnonymousAccess:
__implements__ = ICredentialsChecker
credentialInterfaces = credentials.IAnonymous,
def requestAvatarId(self, credentials):
return defer.succeed(ANONYMOUS)
class InMemoryUsernamePasswordDatabaseDontUse:
credentialInterfaces = credentials.IUsernamePassword,
__implements__ = ICredentialsChecker
def __init__(self):
self.users = {}
def addUser(self, username, password):
self.users[username] = password
def _cbPasswordMatch(self, matched, username):
if matched:
return username
else:
return failure.Failure(error.UnauthorizedLogin())
def requestAvatarId(self, credentials):
if self.users.has_key(credentials.username):
return defer.maybeDeferred(
credentials.checkPassword,
self.users[credentials.username]).addCallback(
self._cbPasswordMatch, credentials.username)
else:
return defer.fail(error.UnauthorizedLogin())
| fxia22/ASM_xf | PythonD/site_python/twisted/cred/checkers.py | Python | gpl-2.0 | 2,547 |
## update-hue-ini.py
##
## This script will extract the appropriate IBM Analytics for Apache Hadoop credentials from the VCAP_SERVICES
## environment variable inside a running container. It will add the username and password to the hue.ini file
## so that the hue application has access to a specific instance
import sys
import os
import json
username = None
password = None
webhdfsurl = None
srcfile = sys.argv[1]
destfile = sys.argv[2]
if "VCAP_SERVICES" in os.environ:
vcaps = json.loads(os.environ["VCAP_SERVICES"])
if "Analytics for Apache Hadoop" in vcaps:
username = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["userid"]
password = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["password"]
webhdfsurl = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["WebhdfsUrl"]
else:
if "WEBHDFS_USER" in os.environ:
username=os.environ["WEBHDFS_USER"]
if "WEBHDFS_PASSWORD" in os.environ:
password=os.environ["WEBHDFS_PASSWORD"]
if "WEBHDFS_URL" in os.environ:
webhdfsurl=os.environ["WEBHDFS_URL"]
if (username is not None and password is not None and webhdfsurl is not None):
filedata = None
with open (srcfile,'r') as file:
filedata = file.read()
filedata = filedata.replace('%instance_user%', username)
filedata = filedata.replace('%instance_user_password%', password)
filedata = filedata.replace('%webhdfs_url%', webhdfsurl)
with open (destfile,'w') as file:
file.write(filedata)
sys.exit(0)
else:
sys.stderr.write('Fatal error: cannot find Web HDFS credentials and/or endpoint\n')
if username is None:
sys.stderr.write('username missing\n')
if password is None:
sys.stderr.write('password missing\n')
if webhdfsurl is None:
sys.stderr.write('URL endpoint missing\n')
sys.exit(1)
| vmanoria/bluemix-hue-filebrowser | update-hue-ini.py | Python | gpl-2.0 | 1,829 |
#!/usr/bin/python
"""
Program that parses standard format results,
compute and check regression bug.
:copyright: Red Hat 2011-2012
:author: Amos Kong <[email protected]>
"""
import os
import sys
import re
import commands
import warnings
import ConfigParser
import MySQLdb
def exec_sql(cmd, conf="../../global_config.ini"):
config = ConfigParser.ConfigParser()
config.read(conf)
user = config.get("AUTOTEST_WEB", "user")
passwd = config.get("AUTOTEST_WEB", "password")
db = config.get("AUTOTEST_WEB", "database")
db_type = config.get("AUTOTEST_WEB", "db_type")
if db_type != 'mysql':
print "regression.py: only support mysql database!"
sys.exit(1)
conn = MySQLdb.connect(host="localhost", user=user,
passwd=passwd, db=db)
cursor = conn.cursor()
cursor.execute(cmd)
rows = cursor.fetchall()
lines = []
for row in rows:
line = []
for c in row:
line.append(str(c))
lines.append(" ".join(line))
cursor.close()
conn.close()
return lines
def get_test_keyval(jobid, keyname, default=''):
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
test_idx = exec_sql('select test_idx from tko_tests where job_idx=%s'
% idx)[3]
try:
return exec_sql('select value from tko_test_attributes'
' where test_idx=%s and attribute="%s"'
% (test_idx, keyname))[-1]
except:
return default
class Sample(object):
""" Collect test results in same environment to a sample """
def __init__(self, sample_type, arg):
def generate_raw_table(test_dict):
ret_dict = []
tmp = []
sample_type = category = None
for i in test_dict:
line = i.split('|')[1:]
if not sample_type:
sample_type = line[0:2]
if sample_type != line[0:2]:
ret_dict.append('|'.join(sample_type + tmp))
sample_type = line[0:2]
tmp = []
if "e+" in line[-1]:
tmp.append("%f" % float(line[-1]))
elif 'e-' in line[-1]:
tmp.append("%f" % float(line[-1]))
elif not (re.findall("[a-zA-Z]", line[-1]) or is_int(line[-1])):
tmp.append("%f" % float(line[-1]))
else:
tmp.append(line[-1])
if category != i.split('|')[0]:
category = i.split('|')[0]
ret_dict.append("Category:" + category.strip())
ret_dict.append(self.categories)
ret_dict.append('|'.join(sample_type + tmp))
return ret_dict
if sample_type == 'filepath':
files = arg.split()
self.files_dict = []
for i in range(len(files)):
fd = open(files[i], "r")
f = []
for l in fd.readlines():
l = l.strip()
if re.findall("^### ", l):
if "kvm-userspace-ver" in l:
self.kvmver = l.split(':')[-1]
elif "kvm_version" in l:
self.hostkernel = l.split(':')[-1]
elif "guest-kernel-ver" in l:
self.guestkernel = l.split(':')[-1]
elif "session-length" in l:
self.len = l.split(':')[-1]
else:
f.append(l.strip())
self.files_dict.append(f)
fd.close()
sysinfodir = os.path.join(os.path.dirname(files[0]), "../../sysinfo/")
sysinfodir = os.path.realpath(sysinfodir)
cpuinfo = commands.getoutput("cat %s/cpuinfo" % sysinfodir)
lscpu = commands.getoutput("cat %s/lscpu" % sysinfodir)
meminfo = commands.getoutput("cat %s/meminfo" % sysinfodir)
lspci = commands.getoutput("cat %s/lspci_-vvnn" % sysinfodir)
partitions = commands.getoutput("cat %s/partitions" % sysinfodir)
fdisk = commands.getoutput("cat %s/fdisk_-l" % sysinfodir)
status_path = os.path.join(os.path.dirname(files[0]), "../status")
status_file = open(status_path, 'r')
content = status_file.readlines()
self.testdata = re.findall("localtime=(.*)\t", content[-1])[-1]
cpunum = len(re.findall("processor\s+: \d", cpuinfo))
cpumodel = re.findall("Model name:\s+(.*)", lscpu)
socketnum = int(re.findall("Socket\(s\):\s+(\d+)", lscpu)[0])
corenum = int(re.findall("Core\(s\) per socket:\s+(\d+)", lscpu)[0]) * socketnum
threadnum = int(re.findall("Thread\(s\) per core:\s+(\d+)", lscpu)[0]) * corenum
numanodenum = int(re.findall("NUMA node\(s\):\s+(\d+)", lscpu)[0])
memnum = float(re.findall("MemTotal:\s+(\d+)", meminfo)[0]) / 1024 / 1024
nicnum = len(re.findall("\d+:\d+\.0 Ethernet", lspci))
disknum = re.findall("sd\w+\S", partitions)
fdiskinfo = re.findall("Disk\s+(/dev/sd.*\s+GiB),", fdisk)
elif sample_type == 'database':
jobid = arg
self.kvmver = get_test_keyval(jobid, "kvm-userspace-ver")
self.hostkernel = get_test_keyval(jobid, "kvm_version")
self.guestkernel = get_test_keyval(jobid, "guest-kernel-ver")
self.len = get_test_keyval(jobid, "session-length")
self.categories = get_test_keyval(jobid, "category")
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
data = exec_sql("select test_idx,iteration_key,iteration_value"
" from tko_perf_view where job_idx=%s" % idx)
testidx = None
job_dict = []
test_dict = []
for l in data:
s = l.split()
if not testidx:
testidx = s[0]
if testidx != s[0]:
job_dict.append(generate_raw_table(test_dict))
test_dict = []
testidx = s[0]
test_dict.append(' | '.join(s[1].split('--')[0:] + s[-1:]))
job_dict.append(generate_raw_table(test_dict))
self.files_dict = job_dict
self.version = " userspace: %s\n host kernel: %s\n guest kernel: %s" % (
self.kvmver, self.hostkernel, self.guestkernel)
nrepeat = len(self.files_dict)
if nrepeat < 2:
print "`nrepeat' should be larger than 1!"
sys.exit(1)
self.desc = """<hr>Machine Info:
o CPUs(%s * %s), Cores(%s), Threads(%s), Sockets(%s),
o NumaNodes(%s), Memory(%.1fG), NICs(%s)
o Disks(%s | %s)
Please check sysinfo directory in autotest result to get more details.
(eg: http://autotest-server.com/results/5057-autotest/host1/sysinfo/)
<hr>""" % (cpunum, cpumodel, corenum, threadnum, socketnum, numanodenum, memnum, nicnum, fdiskinfo, disknum)
self.desc += """ - Every Avg line represents the average value based on *%d* repetitions of the same test,
and the following SD line represents the Standard Deviation between the *%d* repetitions.
- The Standard deviation is displayed as a percentage of the average.
- The significance of the differences between the two averages is calculated using unpaired T-test that
takes into account the SD of the averages.
- The paired t-test is computed for the averages of same category.
""" % (nrepeat, nrepeat)
def getAvg(self, avg_update=None):
return self._process_files(self.files_dict, self._get_list_avg,
avg_update=avg_update)
def getAvgPercent(self, avgs_dict):
return self._process_files(avgs_dict, self._get_augment_rate)
def getSD(self):
return self._process_files(self.files_dict, self._get_list_sd)
def getSDRate(self, sds_dict):
return self._process_files(sds_dict, self._get_rate)
def getTtestPvalue(self, fs_dict1, fs_dict2, paired=None, ratio=None):
"""
scipy lib is used to compute p-value of Ttest
scipy: http://www.scipy.org/
t-test: http://en.wikipedia.org/wiki/Student's_t-test
"""
try:
from scipy import stats
import numpy as np
except ImportError:
print "No python scipy/numpy library installed!"
return None
ret = []
s1 = self._process_files(fs_dict1, self._get_list_self, merge=False)
s2 = self._process_files(fs_dict2, self._get_list_self, merge=False)
# s*[line][col] contians items (line*col) of all sample files
for line in range(len(s1)):
tmp = []
if type(s1[line]) != list:
tmp = s1[line]
else:
if len(s1[line][0]) < 2:
continue
for col in range(len(s1[line])):
avg1 = self._get_list_avg(s1[line][col])
avg2 = self._get_list_avg(s2[line][col])
sample1 = np.array(s1[line][col])
sample2 = np.array(s2[line][col])
warnings.simplefilter("ignore", RuntimeWarning)
if (paired):
if (ratio):
(_, p) = stats.ttest_rel(np.log(sample1), np.log(sample2))
else:
(_, p) = stats.ttest_rel(sample1, sample2)
else:
(_, p) = stats.ttest_ind(sample1, sample2)
flag = "+"
if float(avg1) > float(avg2):
flag = "-"
tmp.append(flag + "%f" % (1 - p))
tmp = "|".join(tmp)
ret.append(tmp)
return ret
def _get_rate(self, data):
""" num2 / num1 * 100 """
result = "0.0"
if len(data) == 2 and float(data[0]) != 0:
result = float(data[1]) / float(data[0]) * 100
if result > 100:
result = "%.2f%%" % result
else:
result = "%.4f%%" % result
return result
def _get_augment_rate(self, data):
""" (num2 - num1) / num1 * 100 """
result = "+0.0"
if len(data) == 2 and float(data[0]) != 0:
result = (float(data[1]) - float(data[0])) / float(data[0]) * 100
if result > 100:
result = "%+.2f%%" % result
else:
result = "%+.4f%%" % result
return result
def _get_list_sd(self, data):
"""
sumX = x1 + x2 + ... + xn
avgX = sumX / n
sumSquareX = x1^2 + ... + xn^2
SD = sqrt([sumSquareX - (n * (avgX ^ 2))] / (n - 1))
"""
o_sum = sqsum = 0.0
n = len(data)
for i in data:
o_sum += float(i)
sqsum += float(i) ** 2
avg = o_sum / n
if avg == 0 or n == 1 or sqsum - (n * avg ** 2) <= 0:
return "0.0"
return "%f" % (((sqsum - (n * avg ** 2)) / (n - 1)) ** 0.5)
def _get_list_avg(self, data):
""" Compute the average of list entries """
o_sum = 0.0
for i in data:
o_sum += float(i)
return "%f" % (o_sum / len(data))
def _get_list_self(self, data):
""" Use this to convert sample dicts """
return data
def _process_lines(self, files_dict, row, func, avg_update, merge):
""" Use unified function to process same lines of different samples """
lines = []
ret = []
for i in range(len(files_dict)):
lines.append(files_dict[i][row].split("|"))
for col in range(len(lines[0])):
data_list = []
for i in range(len(lines)):
tmp = lines[i][col].strip()
if is_int(tmp):
data_list.append(int(tmp))
else:
data_list.append(float(tmp))
ret.append(func(data_list))
if avg_update:
for i in avg_update.split('|'):
l = i.split(',')
ret[int(l[0])] = "%f" % (float(ret[int(l[1])]) /
float(ret[int(l[2])]))
if merge:
return "|".join(ret)
return ret
def _process_files(self, files_dict, func, avg_update=None, merge=True):
"""
Process dicts of sample files with assigned function,
func has one list augment.
"""
ret_lines = []
for i in range(len(files_dict[0])):
if re.findall("[a-zA-Z]", files_dict[0][i]):
ret_lines.append(files_dict[0][i].strip())
else:
line = self._process_lines(files_dict, i, func, avg_update,
merge)
ret_lines.append(line)
return ret_lines
def display(lists, rates, allpvalues, f, ignore_col, o_sum="Augment Rate",
prefix0=None, prefix1=None, prefix2=None, prefix3=None):
"""
Display lists data to standard format
param lists: row data lists
param rates: augment rates lists
param f: result output filepath
param ignore_col: do not display some columns
param o_sum: compare result summary
param prefix0: output prefix in head lines
param prefix1: output prefix in Avg/SD lines
param prefix2: output prefix in Diff Avg/P-value lines
param prefix3: output prefix in total Sign line
"""
def str_ignore(out, split=False):
out = out.split("|")
for i in range(ignore_col):
out[i] = " "
if split:
return "|".join(out[ignore_col:])
return "|".join(out)
def tee_line(content, filepath, n=None):
fd = open(filepath, "a")
print content
out = ""
out += "<TR ALIGN=CENTER>"
content = content.split("|")
for i in range(len(content)):
if not is_int(content[i]) and is_float(content[i]):
if "+" in content[i] or "-" in content[i]:
if float(content[i]) > 100:
content[i] = "%+.2f" % float(content[i])
else:
content[i] = "%+.4f" % float(content[i])
elif float(content[i]) > 100:
content[i] = "%.2f" % float(content[i])
else:
content[i] = "%.4f" % float(content[i])
if n and i >= 2 and i < ignore_col + 2:
out += "<TD ROWSPAN=%d WIDTH=1%% >%.0f</TD>" % (n, float(content[i]))
else:
out += "<TD WIDTH=1%% >%s</TD>" % content[i]
out += "</TR>"
fd.write(out + "\n")
fd.close()
for l in range(len(lists[0])):
if not re.findall("[a-zA-Z]", lists[0][l]):
break
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 width=10%><TBODY>",
f)
tee("<h3>== %s " % o_sum + "==</h3>", f)
category = 0
for i in range(len(lists[0])):
for n in range(len(lists)):
is_diff = False
for j in range(len(lists)):
if lists[0][i] != lists[j][i]:
is_diff = True
if len(lists) == 1 and not re.findall("[a-zA-Z]", lists[j][i]):
is_diff = True
pfix = prefix1[0]
if len(prefix1) != 1:
pfix = prefix1[n]
if is_diff:
if n == 0:
tee_line(pfix + lists[n][i], f, n=len(lists) + len(rates))
else:
tee_line(pfix + str_ignore(lists[n][i], True), f)
if not is_diff and n == 0:
if '|' in lists[n][i]:
tee_line(prefix0 + lists[n][i], f)
elif "Category:" in lists[n][i]:
if category != 0 and prefix3:
if len(allpvalues[category - 1]) > 0:
tee_line(prefix3 + str_ignore(
allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
tee("<br>", f)
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 "
"width=10%><TBODY>", f)
category += 1
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
else:
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
for n in range(len(rates)):
if lists[0][i] != rates[n][i] and (not re.findall("[a-zA-Z]",
rates[n][i]) or "nan" in rates[n][i]):
tee_line(prefix2[n] + str_ignore(rates[n][i], True), f)
if prefix3 and len(allpvalues[-1]) > 0:
tee_line(prefix3 + str_ignore(allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
def analyze(test, sample_type, arg1, arg2, configfile):
""" Compute averages/p-vales of two samples, print results nicely """
config = ConfigParser.ConfigParser()
config.read(configfile)
ignore_col = int(config.get(test, "ignore_col"))
avg_update = config.get(test, "avg_update")
desc = config.get(test, "desc")
def get_list(directory):
result_file_pattern = config.get(test, "result_file_pattern")
cmd = 'find %s|grep "%s.*/%s"' % (directory, test, result_file_pattern)
print cmd
return commands.getoutput(cmd)
if sample_type == 'filepath':
arg1 = get_list(arg1)
arg2 = get_list(arg2)
commands.getoutput("rm -f %s.*html" % test)
s1 = Sample(sample_type, arg1)
avg1 = s1.getAvg(avg_update=avg_update)
sd1 = s1.getSD()
s2 = Sample(sample_type, arg2)
avg2 = s2.getAvg(avg_update=avg_update)
sd2 = s2.getSD()
sd1 = s1.getSDRate([avg1, sd1])
sd2 = s1.getSDRate([avg2, sd2])
avgs_rate = s1.getAvgPercent([avg1, avg2])
navg1 = []
navg2 = []
allpvalues = []
tmp1 = []
tmp2 = []
for i in range(len(avg1)):
if not re.findall("[a-zA-Z]", avg1[i]):
tmp1.append([avg1[i]])
tmp2.append([avg2[i]])
elif 'Category' in avg1[i] and i != 0:
navg1.append(tmp1)
navg2.append(tmp2)
tmp1 = []
tmp2 = []
navg1.append(tmp1)
navg2.append(tmp2)
for i in range(len(navg1)):
allpvalues.append(s1.getTtestPvalue(navg1[i], navg2[i], True, True))
pvalues = s1.getTtestPvalue(s1.files_dict, s2.files_dict, False)
rlist = [avgs_rate]
if pvalues:
# p-value list isn't null
rlist.append(pvalues)
desc = desc % s1.len
tee("<pre>####1. Description of setup#1\n%s\n test data: %s</pre>"
% (s1.version, s1.testdata), "%s.html" % test)
tee("<pre>####2. Description of setup#2\n%s\n test data: %s</pre>"
% (s2.version, s2.testdata), "%s.html" % test)
tee("<pre>" + '\n'.join(desc.split('\\n')) + "</pre>", test + ".html")
tee("<pre>" + s1.desc + "</pre>", test + ".html")
display([avg1, sd1, avg2, sd2], rlist, allpvalues, test + ".html",
ignore_col, o_sum="Regression Testing: %s" % test, prefix0="#|Tile|",
prefix1=["1|Avg|", " |%SD|", "2|Avg|", " |%SD|"],
prefix2=["-|%Diff between Avg|", "-|Significance|"],
prefix3="-|Total Significance|")
display(s1.files_dict, [avg1], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 1", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
display(s2.files_dict, [avg2], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 2", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
def is_int(n):
try:
int(n)
return True
except ValueError:
return False
def is_float(n):
try:
float(n)
return True
except ValueError:
return False
def tee(content, filepath):
""" Write content to standard output and filepath """
fd = open(filepath, "a")
fd.write(content + "\n")
fd.close()
print content
if __name__ == "__main__":
if len(sys.argv) != 5:
this = os.path.basename(sys.argv[0])
print 'Usage: %s <testname> filepath <dir1> <dir2>' % this
print ' or %s <testname> db <jobid1> <jobid2>' % this
sys.exit(1)
analyze(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], 'perf.conf')
| will-Do/avocado-vt | scripts/regression.py | Python | gpl-2.0 | 20,876 |
import unittest, tempfile, sys, os.path
datadir = os.environ.get('APPORT_DATA_DIR', '/usr/share/apport')
sys.path.insert(0, os.path.join(datadir, 'general-hooks'))
import parse_segv
# Default global registers, maps, and disassembly for testing
regs = '''eax 0xffffffff -1
ecx 0xbfc6af40 -1077498048
edx 0x1 1
ebx 0x26eff4 2551796
esp 0xbfc6af24 0xbfc6af24
ebp 0xbfc6af28 0xbfc6af28
esi 0x826bb60 136756064
edi 0x8083480 134755456
eip 0x808354e 0x808354e <main+14>
eflags 0x200286 [ PF SF IF ID ]
cs 0x73 115
ss 0x7b 123
ds 0x7b 123
es 0x7b 123
fs 0x4 4
gs 0x33 51
'''
regs64 = '''rax 0xffffffffffffffff -1
rbx 0x26eff4 2551796
rcx 0xffffffffffffffff -1
rdx 0xffffffffff600180 -10485376
rsi 0x0 0
rdi 0x7fffffffe3b0 140737488348080
rbp 0x0 0x0
rsp 0x0000bfc6af24 0x0000bfc6af24
r8 0x0 0
r9 0x0 0
r10 0x7fffffffe140 140737488347456
r11 0x246 582
r12 0x7fffffffe400 140737488348160
r13 0x7fffffffe468 140737488348264
r14 0x1 1
r15 0x7fffffffe460 140737488348256
rip 0x7ffff790be10 0x7ffff790be10 <nanosleep+16>
eflags 0x246 [ PF ZF IF ]
cs 0x33 51
ss 0x2b 43
ds 0x0 0
es 0x0 0
fs 0x0 0
gs 0x0 0
fctrl 0x37f 895
fstat 0x0 0
ftag 0xffff 65535
fiseg 0x0 0
fioff 0x40303a 4206650
foseg 0x0 0
fooff 0x0 0
fop 0x5d8 1496
mxcsr 0x1f80 [ IM DM ZM OM UM PM ]
'''
maps = '''00110000-0026c000 r-xp 00000000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026c000-0026d000 ---p 0015c000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026d000-0026f000 r--p 0015c000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
0026f000-00270000 rw-p 0015e000 08:06 375131 /lib/tls/i686/cmov/libc-2.9.so
00270000-00273000 rw-p 00000000 00:00 0
002c1000-002e5000 r-xp 00000000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
002e5000-002e6000 r--p 00023000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
002e6000-002e7000 rw-p 00024000 08:06 375135 /lib/tls/i686/cmov/libm-2.9.so
00318000-00334000 r-xp 00000000 08:06 977846 /lib/ld-2.9.so
00334000-00335000 r--p 0001b000 08:06 977846 /lib/ld-2.9.so
00335000-00336000 rw-p 0001c000 08:06 977846 /lib/ld-2.9.so
0056e000-005a1000 r-xp 00000000 08:06 65575 /lib/libncurses.so.5.7
005a1000-005a3000 r--p 00033000 08:06 65575 /lib/libncurses.so.5.7
005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00bb6000-00bcb000 r-xp 00000000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcb000-00bcc000 r--p 00014000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcc000-00bcd000 rw-p 00015000 08:06 375202 /lib/tls/i686/cmov/libpthread-2.9.so
00bcd000-00bcf000 rw-p 00000000 00:00 0
00beb000-00bed000 r-xp 00000000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00bed000-00bee000 r--p 00001000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00bee000-00bef000 rw-p 00002000 08:06 375134 /lib/tls/i686/cmov/libdl-2.9.so
00c56000-00c7a000 r-xp 00000000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00c7a000-00c7c000 r--p 00023000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00c7c000-00c7d000 rw-p 00025000 08:06 1140420 /usr/lib/libexpat.so.1.5.2
00dce000-00dfa000 r-xp 00000000 08:06 65612 /lib/libreadline.so.5.2
00dfa000-00dfb000 ---p 0002c000 08:06 65612 /lib/libreadline.so.5.2
00dfb000-00dfc000 r--p 0002c000 08:06 65612 /lib/libreadline.so.5.2
00dfc000-00dff000 rw-p 0002d000 08:06 65612 /lib/libreadline.so.5.2
00dff000-00e00000 rw-p 00000000 00:00 0
08048000-0831c000 r-xp 00000000 08:06 1140349 /usr/bin/gdb
0831c000-0831d000 r--p 002d3000 08:06 1140349 /usr/bin/gdb
0831d000-08325000 rw-p 002d4000 08:06 1140349 /usr/bin/gdb
08325000-0833f000 rw-p 00000000 00:00 0
b8077000-b807a000 rw-p 00000000 00:00 0
b8096000-b8098000 rw-p 00000000 00:00 0
bfc57000-bfc6c000 rw-p 00000000 00:00 0 [stack]
'''
disasm = '''0x08083540 <main+0>: lea 0x4(%esp),%ecx
0x08083544 <main+4>: and $0xfffffff0,%esp
0x08083547 <main+7>: pushl -0x4(%ecx)
0x0808354a <main+10>: push %ebp
0x0808354b <main+11>: mov %esp,%ebp
0x0808354d <main+13>: push %ecx
0x0808354e <main+14>: sub $0x14,%esp
0x08083551 <main+17>: mov (%ecx),%eax
0x08083553 <main+19>: mov 0x4(%ecx),%edx
0x08083556 <main+22>: lea -0x14(%ebp),%ecx
0x08083559 <main+25>: movl $0x0,-0xc(%ebp)
0x08083560 <main+32>: movl $0x826bc68,-0x8(%ebp)
0x08083567 <main+39>: mov %eax,-0x14(%ebp)
0x0808356a <main+42>: mov %edx,-0x10(%ebp)
0x0808356d <main+45>: mov %ecx,(%esp)
0x08083570 <main+48>: call 0x8083580 <gdb_main>
0x08083575 <main+53>: add $0x14,%esp
0x08083578 <main+56>: pop %ecx
0x08083579 <main+57>: pop %ebp
0x0808357a <main+58>: lea -0x4(%ecx),%esp
0x0808357d <main+61>: ret
'''
class T(unittest.TestCase):
'''Test Segfault Parser'''
def test_invalid_00_registers(self):
'''Require valid registers'''
regs = 'a 0x10\nb !!!\n'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, '', '')
try:
segv = parse_segv.ParseSegv(regs, '', '')
except ValueError as e:
self.assertTrue('invalid literal for int()' in str(e), str(e))
regs = 'a 0x10'
disasm = '0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.regs['a'], 0x10, segv)
segv.regs = None
self.assertRaises(ValueError, segv.parse_disassembly, '')
def test_invalid_01_disassembly(self):
'''Require valid disassembly'''
regs = 'a 0x10'
disasm = ''
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'Dump ...'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'Dump ...\nmonkey'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = 'monkey'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, '')
disasm = '0x1111111111: Cannot access memory at address 0x1111111111\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x1111111111, segv.pc)
self.assertEqual(segv.insn, None, segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x2111111111: \n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x2111111111, segv.pc)
self.assertEqual(segv.insn, None, segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x8069ff0 <fopen@plt+132220>: cmpb $0x0,(%eax,%ebx,1)\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x8069ff0, segv.pc)
self.assertEqual(segv.insn, 'cmpb', segv.insn)
self.assertEqual(segv.src, '$0x0', segv.src)
self.assertEqual(segv.dest, '(%eax,%ebx,1)', segv.dest)
disasm = '0xb765bb48 <_XSend+440>: call *0x40(%edi)\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb765bb48, segv.pc)
self.assertEqual(segv.insn, 'call', segv.insn)
self.assertEqual(segv.src, '*0x40(%edi)', segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0xb7aae5a0: call 0xb7a805af <_Unwind_Find_FDE@plt+111>\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb7aae5a0, segv.pc)
self.assertEqual(segv.insn, 'call', segv.insn)
self.assertEqual(segv.src, '0xb7a805af', segv.src)
self.assertEqual(segv.dest, None, segv.dest)
disasm = '0x09083540: mov 0x4(%esp),%es:%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x09083540, segv.pc)
self.assertEqual(segv.insn, 'mov', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%es:%ecx', segv.dest)
disasm = '0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083540, segv.pc)
self.assertEqual(segv.insn, 'lea', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%ecx', segv.dest)
disasm = '''0x404127 <exo_mount_hal_device_mount+167>:
repz cmpsb %es:(%rdi),%ds:(%rsi)\n'''
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x0404127, segv.pc)
self.assertEqual(segv.insn, 'repz cmpsb', segv.insn)
self.assertEqual(segv.src, '%es:(%rdi)', segv.src)
self.assertEqual(segv.dest, '%ds:(%rsi)', segv.dest)
disasm = '0xb031765a <hufftab16+570>: add 0x3430433,%eax'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0xb031765a, segv.pc)
self.assertEqual(segv.insn, 'add', segv.insn)
self.assertEqual(segv.src, '0x3430433', segv.src)
self.assertEqual(segv.dest, '%eax', segv.dest)
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083540, segv.pc)
self.assertEqual(segv.insn, 'lea', segv.insn)
self.assertEqual(segv.src, '0x4(%esp)', segv.src)
self.assertEqual(segv.dest, '%ecx', segv.dest)
disasm = '0x08083550 <main+0>: nop\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083550, segv.pc)
self.assertEqual(segv.insn, 'nop', segv.insn)
self.assertEqual(segv.src, None, segv.src)
self.assertEqual(segv.dest, None, segv.dest)
regs = 'esp 0x444'
disasm = '0x08083560 <main+0>: push %ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083560, segv.pc)
self.assertEqual(segv.insn, 'push', segv.insn)
self.assertEqual(segv.src, '%ecx', segv.src)
self.assertEqual(segv.dest, '(%esp)', segv.dest)
# GDB 7.1
regs = 'esp 0x444'
disasm = '=> 0x08083560 <main+0>: push %ecx\n'
segv = parse_segv.ParseSegv(regs, disasm, '')
self.assertEqual(segv.pc, 0x08083560, segv.pc)
self.assertEqual(segv.insn, 'push', segv.insn)
self.assertEqual(segv.src, '%ecx', segv.src)
self.assertEqual(segv.dest, '(%esp)', segv.dest)
def test_ioport_operation(self):
'''I/O port violations'''
regs = 'rax 0x3 3'
disasm = '''0x4087f1 <snd_pcm_hw_params_set_channels_near@plt+19345>:
out %al,$0xb3
'''
maps = '''00400000-00412000 r-xp 00000000 08:04 10371157 /usr/sbin/pommed
00611000-00614000 rw-p 00011000 08:04 10371157 /usr/sbin/pommed
00614000-00635000 rw-p 00614000 00:00 0 [heap]
'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertEqual(segv.pc, 0x4087f1, segv.pc)
self.assertEqual(segv.insn, 'out', segv.insn)
self.assertEqual(segv.src, '%al', segv.src)
self.assertEqual(segv.dest, '$0xb3', segv.dest)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('disallowed I/O port operation on port 3' in reason, reason)
def test_invalid_02_maps(self):
'''Require valid maps'''
regs = 'a 0x10'
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
maps = 'asdlkfjaadf'
self.assertRaises(ValueError, parse_segv.ParseSegv, regs, disasm, maps)
maps = '''005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00c67000-00c68000 r--p 00000000 00:00 0 '''
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertEqual(segv.maps[0]['start'], 0x005a3000, segv)
self.assertEqual(segv.maps[0]['end'], 0x005a4000, segv)
self.assertEqual(segv.maps[0]['perms'], 'rw-p', segv)
self.assertEqual(segv.maps[0]['name'], '/lib/libncurses.so.5.7', segv)
self.assertEqual(segv.maps[1]['start'], 0x00b67000, segv)
self.assertEqual(segv.maps[1]['end'], 0x00b68000, segv)
self.assertEqual(segv.maps[1]['perms'], 'r-xp', segv)
self.assertEqual(segv.maps[1]['name'], '[vdso]', segv)
self.assertEqual(segv.maps[2]['start'], 0x00c67000, segv)
self.assertEqual(segv.maps[2]['end'], 0x00c68000, segv)
self.assertEqual(segv.maps[2]['perms'], 'r--p', segv)
self.assertEqual(segv.maps[2]['name'], None, segv)
def test_debug(self):
'''Debug mode works'''
regs = 'a 0x10'
disasm = 'Dump ...\n0x08083540 <main+0>: lea 0x4(%esp),%ecx\n'
maps = '''005a3000-005a4000 rw-p 00035000 08:06 65575 /lib/libncurses.so.5.7
00b67000-00b68000 r-xp 00000000 00:00 0 [vdso]
00c67000-00c68000 r--p 00000000 00:00 0 '''
sys.stderr = tempfile.NamedTemporaryFile(prefix='parse_segv-stderr-')
segv = parse_segv.ParseSegv(regs, disasm, maps, debug=True)
self.assertTrue(segv is not None, segv)
def test_register_values(self):
'''Sub-register parsing'''
disasm = '''0x08083540 <main+0>: mov $1,%ecx'''
segv = parse_segv.ParseSegv(regs64, disasm, '')
val = segv.register_value('%rdx')
self.assertEqual(val, 0xffffffffff600180, hex(val))
val = segv.register_value('%edx')
self.assertEqual(val, 0xff600180, hex(val))
val = segv.register_value('%dx')
self.assertEqual(val, 0x0180, hex(val))
val = segv.register_value('%dl')
self.assertEqual(val, 0x80, hex(val))
def test_segv_unknown(self):
'''Handles unknown segfaults'''
disasm = '''0x08083540 <main+0>: mov $1,%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
# Verify calculations
self.assertEqual(segv.calculate_arg('(%ecx)'), 0xbfc6af40, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('0x10(%ecx)'), 0xbfc6af50, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('-0x20(%ecx)'), 0xbfc6af20, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('%fs:(%ecx)'), 0xbfc6af44, segv.regs['ecx'])
self.assertEqual(segv.calculate_arg('0x3404403'), 0x3404403, '0x3404403')
self.assertEqual(segv.calculate_arg('*0x40(%edi)'), 0x80834c0, segv.regs['edi'])
self.assertEqual(segv.calculate_arg('(%edx,%ebx,1)'), 0x26eff5, segv.regs['ebx'])
self.assertEqual(segv.calculate_arg('(%eax,%ebx,1)'), 0x26eff3, segv.regs['ebx'])
self.assertEqual(segv.calculate_arg('0x10(,%ebx,1)'), 0x26f004, segv.regs['ebx'])
# Again, but 64bit
disasm = '''0x08083540 <main+0>: mov $1,%rcx'''
segv = parse_segv.ParseSegv(regs64, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertEqual(segv.calculate_arg('(%rax,%rbx,1)'), 0x26eff3, segv.regs['rbx'])
def test_segv_pc_missing(self):
'''Handles PC in missing VMA'''
disasm = '''0x00083540 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00083540) not located in a known VMA region' in details, details)
self.assertTrue('executing unknown VMA' in reason, reason)
disasm = '''0x00083544:'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00083544) not located in a known VMA region' in details, details)
self.assertTrue('executing unknown VMA' in reason, reason)
def test_segv_pc_null(self):
'''Handles PC in NULL VMA'''
disasm = '''0x00000540 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00000540) not located in a known VMA region' in details, details)
self.assertTrue('executing NULL VMA' in reason, reason)
def test_segv_pc_nx_writable(self):
'''Handles PC in writable NX VMA'''
disasm = '''0x005a3000 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x005a3000) in non-executable VMA region:' in details, details)
self.assertTrue('executing writable VMA /lib/libncurses.so.5.7' in reason, reason)
def test_segv_pc_nx_unwritable(self):
'''Handles PC in non-writable NX VMA'''
disasm = '''0x00dfb000 <main+0>: lea 0x4(%esp),%ecx'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('PC (0x00dfb000) in non-executable VMA region:' in details, details)
self.assertTrue('executing non-writable VMA /lib/libreadline.so.5.2' in reason, reason)
def test_segv_src_missing(self):
'''Handles source in missing VMA'''
reg = regs + 'ecx 0x0006af24 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
# Valid crash
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x0006af20) not located in a known VMA region' in details, details)
self.assertTrue('reading unknown VMA' in reason, reason)
# Valid crash
disasm = '0x08083547 <main+7>: callq *%ecx'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "*%ecx" (0x0006af24) not located in a known VMA region' in details, details)
self.assertTrue('reading unknown VMA' in reason, reason)
def test_segv_src_null(self):
'''Handles source in NULL VMA'''
reg = regs + 'ecx 0x00000024 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x00000020) not located in a known VMA region' in details, details)
self.assertTrue('reading NULL VMA' in reason, reason)
def test_segv_src_not_readable(self):
'''Handles source not in readable VMA'''
reg = regs + 'ecx 0x0026c080 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('source "-0x4(%ecx)" (0x0026c07c) in non-readable VMA region:' in details, details)
self.assertTrue('reading VMA /lib/tls/i686/cmov/libc-2.9.so' in reason, reason)
self.assertFalse('Stack memory exhausted' in details, details)
self.assertFalse('Stack pointer not within stack segment' in details, details)
def test_segv_dest_missing(self):
'''Handles destintation in missing VMA'''
reg = regs + 'esp 0x0006af24 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x0006af24) not located in a known VMA region' in details, details)
self.assertTrue('writing unknown VMA' in reason, reason)
def test_segv_dest_null(self):
'''Handles destintation in NULL VMA'''
reg = regs + 'esp 0x00000024 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x00000024) not located in a known VMA region' in details, details)
self.assertTrue('writing NULL VMA' in reason, reason)
def test_segv_dest_not_writable(self):
'''Handles destination not in writable VMA'''
reg = regs + 'esp 0x08048080 0xbfc6af24'
disasm = '0x08083547 <main+7>: pushl -0x4(%ecx)'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0x08048080) in non-writable VMA region:' in details, details)
self.assertTrue('writing VMA /usr/bin/gdb' in reason, reason)
def test_segv_crackful_disasm(self):
'''Rejects insane disassemblies'''
disasm = '0x08083547 <main+7>: pushl -0x4(blah)'
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertRaises(ValueError, segv.report)
disasm = '0x08083547 <main+7>: pushl -04(%ecx)'
segv = parse_segv.ParseSegv(regs, disasm, maps)
self.assertRaises(ValueError, segv.report)
def test_segv_stack_failure(self):
'''Handles walking off the stack'''
# Triggered via "push"
reg = regs + 'esp 0xbfc56ff0 0xbfc56ff0'
disasm = '0x08083547 <main+7>: push %eax'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0xbfc56ff0) not located in a known VMA region (needed writable region)!' in details, details)
# Triggered via "call"
reg = regs + 'esp 0xbfc56fff 0xbfc56fff'
disasm = '0x08083547 <main+7>: callq 0x08083540'
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('destination "(%esp)" (0xbfc56fff) not located in a known VMA region (needed writable region)!' in details, details)
self.assertTrue('Stack memory exhausted' in details, details)
# Triggered via unknown reason
reg = regs + 'esp 0xdfc56000 0xdfc56000'
disasm = '''0x08083540 <main+0>: mov $1,%rcx'''
segv = parse_segv.ParseSegv(reg, disasm, maps)
understood, reason, details = segv.report()
self.assertTrue(understood, details)
self.assertTrue('SP (0xdfc56000) not located in a known VMA region (needed readable region)!' in details, details)
self.assertTrue('Stack pointer not within stack segment' in details, details)
def test_segv_stack_kernel_segfault(self):
'''Handles unknown segfaults in kernel'''
# Crash in valid code path
disasm = '''0x0056e010: ret'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertTrue('Reason could not be automatically determined.' in details, details)
self.assertFalse('(Unhandled exception in kernel code?)' in details, details)
# Crash from kernel code path
disasm = '''0x00b67422 <__kernel_vsyscall+2>: ret'''
segv = parse_segv.ParseSegv(regs, disasm, maps)
understood, reason, details = segv.report()
self.assertFalse(understood, details)
self.assertTrue('Reason could not be automatically determined. (Unhandled exception in kernel code?)' in details, details)
unittest.main()
| rickysarraf/apport | test/test_parse_segv.py | Python | gpl-2.0 | 24,951 |
from boxbranding import getBoxType, getBrandOEM
from Components.About import about
class HardwareInfo:
device_name = None
device_version = None
def __init__(self):
if HardwareInfo.device_name is not None:
# print "using cached result"
return
HardwareInfo.device_name = "unknown"
try:
file = open("/proc/stb/info/model", "r")
HardwareInfo.device_name = file.readline().strip()
file.close()
try:
file = open("/proc/stb/info/version", "r")
HardwareInfo.device_version = file.readline().strip()
file.close()
except:
pass
except:
print "----------------"
print "you should upgrade to new drivers for the hardware detection to work properly"
print "----------------"
print "fallback to detect hardware via /proc/cpuinfo!!"
try:
rd = open("/proc/cpuinfo", "r").read()
if "Brcm4380 V4.2" in rd:
HardwareInfo.device_name = "dm8000"
print "dm8000 detected!"
elif "Brcm7401 V0.0" in rd:
HardwareInfo.device_name = "dm800"
print "dm800 detected!"
elif "MIPS 4KEc V4.8" in rd:
HardwareInfo.device_name = "dm7025"
print "dm7025 detected!"
except:
pass
def get_device_name(self):
return HardwareInfo.device_name
def get_device_version(self):
return HardwareInfo.device_version
def has_hdmi(self):
return getBrandOEM() in ('xtrend', 'gigablue', 'dags', 'ixuss', 'odin', 'vuplus', 'ini', 'ebox', 'ceryon') or (getBoxType() in ('dm7020hd', 'dm800se', 'dm500hd', 'dm8000') and HardwareInfo.device_version is not None)
def has_deepstandby(self):
return getBoxType() != 'dm800'
def is_nextgen(self):
if about.getCPUString() in ('BCM7346B2', 'BCM7425B2', 'BCM7429B0'):
return True
return False | BlackHole/enigma2-1 | lib/python/Tools/HardwareInfo.py | Python | gpl-2.0 | 1,714 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# dict to scalar
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
| pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/frame/test_replace.py | Python | gpl-2.0 | 42,783 |
# PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
DEFAULT_PERSON_PUMS2000_QUERIES = [ "alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 1",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7"]
DEFAULT_PERSON_PUMSACS_QUERIES = ["alter table person_pums change agep age bigint",
"alter table person_pums change puma pumano bigint",
"alter table person_pums change rac1p race1 bigint",
"alter table person_pums change st state bigint",
"alter table person_pums change sporder pnum bigint",
"alter table person_pums change rel relate bigint",
"alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"alter table person_pums add index(serialno)",
"create table person_pums1 select person_pums.*, hhid from person_pums left join serialcorr using(serialno)",
"update person_pums1 set serialno = hhid",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums1",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 0",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7",
"drop table hhld_sample_temp",
"drop table person_pums1"]
DEFAULT_HOUSING_PUMS2000_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 0",
"update housing_pums set hhtype = 2 where unittype = 1 or unittype = 2",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 5",
"update housing_pums set hhldtype = 5 where hht = 6 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc > 0",
"update housing_pums set childpresence = 2 where noc = 0",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = unittype where unittype >0",
"update housing_pums set groupquarter = -99 where unittype =0",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table hhld_sample",
"drop table gq_sample",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)"]
DEFAULT_HOUSING_PUMSACS_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums change hincp hinc bigint",
"alter table housing_pums change np persons bigint",
"alter table housing_pums change hupaoc noc bigint",
"alter table housing_pums change type unittype bigint",
"alter table housing_pums change st state bigint",
"alter table housing_pums change puma pumano bigint",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 1",
"update housing_pums set hhtype = 2 where unittype = 2 or unittype = 3",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 6",
"update housing_pums set hhldtype = 5 where hht = 5 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc =1 or noc = 2 or noc = 3",
"update housing_pums set childpresence = 2 where noc = 4",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = 1 where unittype >1",
"update housing_pums set groupquarter = -99 where unittype =1",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table serialcorr",
"create table serialcorr select state, pumano, serialno from housing_pums group by serialno",
"alter table serialcorr add column hhid bigint primary key auto_increment not null",
"alter table serialcorr add index(serialno)",
"drop table hhld_sample",
"drop table gq_sample",
"alter table housing_pums add index(serialno)",
"create table housing_pums1 select housing_pums.*, hhid from housing_pums left join serialcorr using(serialno)",
"update housing_pums1 set serialno = hhid",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums1 where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums1 where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)",
"drop table housing_pums1"]
DEFAULT_SF2000_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column groupquarter2 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"update %s set agep1 = (P008003+P008004+P008005+P008006+P008007) + (P008042+P008043+P008044+P008045+P008046)",
"update %s set agep2 = (P008008+P008009+P008010+P008011+P008012+P008013+P008014+P008015+P008016+P008017 ) + (P008047+P008048+P008049+P008050+P008051+P008052+P008053+P008054+P008055+P008056)",
"update %s set agep3 = (P008018+P008019+P008020+P008021+P008022+P008023+P008024+P008025 ) + (P008057+P008058+P008059+P008060+P008061+P008062+P008063+P008064)",
"update %s set agep4 = (P008026+P008027) + (P008065+P008066)",
"update %s set agep5 = (P008028+P008029) + (P008067+P008068)",
"update %s set agep6 = (P008030+P008031) + (P008069+P008070)",
"update %s set agep7 = (P008032+P008033+P008034) + (P008071+P008072+P008073)",
"update %s set agep8 = (P008035+P008036+P008037) + (P008074+P008075+P008076)",
"update %s set agep9 = (P008038+P008039) + (P008077+P008078)",
"update %s set agep10 = (P008040) + (P008079)",
"update %s set gender1 = P008002",
"update %s set gender2 = P008041",
"update %s set race1 = P006002",
"update %s set race2 = P006003",
"update %s set race3 = P006004",
"update %s set race4 = P006005",
"update %s set race5 = P006006",
"update %s set race6 = P006007",
"update %s set race7 = P006008",
"update %s set employment1 = agep1+agep2+P008018+P008057",
"update %s set employment2 = P043004+P043006+P043011+P043013",
"update %s set employment3 = P043007+P043014",
"update %s set employment4 = P043008+P043015",
"update %s set childpresence1 = P010008 + P010012 + P010015",
"update %s set childpresence2 = P010009 + P010013 + P010016 + P010017 + P010002",
"update %s set groupquarter1 = P009026",
"update %s set groupquarter2 = P009027",
"update %s set hhldinc1 = P052002 + P052003",
"update %s set hhldinc2 = P052004 + P052005",
"update %s set hhldinc3 = P052006 + P052007",
"update %s set hhldinc4 = P052008 + P052009",
"update %s set hhldinc5 = P052010 + P052011",
"update %s set hhldinc6 = P052012 + P052013",
"update %s set hhldinc7 = P052014 + P052015",
"update %s set hhldinc8 = P052016 + P052017",
"update %s set hhldsize1 = P014010 ",
"update %s set hhldsize2 = P014003+P014011 ",
"update %s set hhldsize3 = P014004+P014012 ",
"update %s set hhldsize4 = P014005+P014013 ",
"update %s set hhldsize5 = P014006+P014014 ",
"update %s set hhldsize6 = P014007+P014015 ",
"update %s set hhldsize7 = P014008+P014016 ",
"update %s set hhldtype1 = P010007",
"update %s set hhldtype2 = P010011 ",
"update %s set hhldtype3 = P010014",
"update %s set hhldtype4 = P010002",
"update %s set hhldtype5 = P010017",
"update %s set hhldrage1 = P012002",
"update %s set hhldrage2 = P012017",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1, groupquarter2 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7, employment1, employment2, employment3, employment4 from"""
""" %s"""]
DEFAULT_SFACS_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column race11 bigint",
"alter table %s add column race12 bigint",
"alter table %s add column race13 bigint",
"alter table %s add column race14 bigint",
"alter table %s add column race15 bigint",
"alter table %s add column race16 bigint",
"alter table %s add column race17 bigint",
"alter table %s add column race21 bigint",
"alter table %s add column race22 bigint",
"alter table %s add column race23 bigint",
"alter table %s add column race24 bigint",
"alter table %s add column race25 bigint",
"alter table %s add column race26 bigint",
"alter table %s add column race27 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"alter table %s add column check_gender bigint",
"alter table %s add column check_age bigint",
"alter table %s add column check_race bigint",
"alter table %s add column check_race1 bigint",
"alter table %s add column check_race2 bigint",
"alter table %s add column check_employment bigint",
"alter table %s add column check_type bigint",
"alter table %s add column check_size bigint",
"alter table %s add column check_fam bigint",
"alter table %s add column check_hhldrage bigint",
"alter table %s add column check_inc bigint",
"alter table %s add column check_child bigint",
"update %s set agep1 = (B01001000003)+(B01001000027)",
"update %s set agep2 = (B01001000004+B01001000005) + (B01001000028+B01001000029)",
"update %s set agep3 = (B01001000006+B01001000007+B01001000008+B01001000009+B01001000010) + (B01001000030+B01001000031+B01001000032+B01001000033+B01001000034)",
"update %s set agep4 = (B01001000011+B01001000012) + (B01001000035+B01001000036)",
"update %s set agep5 = (B01001000013+B01001000014) + (B01001000037+B01001000038)",
"update %s set agep6 = (B01001000015+B01001000016) + (B01001000039+B01001000040)",
"update %s set agep7 = (B01001000017+B01001000018+B01001000019) + (B01001000041+B01001000042+B01001000043)",
"update %s set agep8 = (B01001000020+B01001000021+B01001000022) + (B01001000044+B01001000045+B01001000046)",
"update %s set agep9 = (B01001000023+B01001000024) + (B01001000047+B01001000048)",
"update %s set agep10 = (B01001000025) + (B01001000049)",
"update %s set gender1 = B01001000002",
"update %s set gender2 = B01001000026",
"update %s set race1 = B02001000002",
"update %s set race2 = B02001000003",
"update %s set race3 = B02001000004",
"update %s set race4 = B02001000005",
"update %s set race5 = B02001000006",
"update %s set race6 = B02001000007",
"update %s set race7 = B02001000009+B02001000010",
"update %s set race11 = C01001A00001",
"update %s set race12 = C01001B00001",
"update %s set race13 = C01001C00001",
"update %s set race14 = C01001D00001",
"update %s set race15 = C01001E00001",
"update %s set race16 = C01001F00001",
"update %s set race17 = C01001G00001",
"update %s set race21 = B01001A00001",
"update %s set race22 = B01001B00001",
"update %s set race23 = B01001C00001",
"update %s set race24 = B01001D00001",
"update %s set race25 = B01001E00001",
"update %s set race26 = B01001F00001",
"update %s set race27 = B01001G00001",
"""update %s set employment2 = (B23001000005 + B23001000007) + (B23001000012 + B23001000014) + """
"""(B23001000019 + B23001000021) + (B23001000026 + B23001000028) + (B23001000033 + B23001000035) + """
"""(B23001000040 + B23001000042) + (B23001000047 + B23001000049) + (B23001000054 + B23001000056) + """
"""(B23001000061 + B23001000063) + (B23001000068 + B23001000070) + (B23001000075 + B23001000080 + B23001000085) + """
"""(B23001000091 + B23001000093) + (B23001000098 + B23001000100) + """
"""(B23001000105 + B23001000107) + (B23001000112 + B23001000114) + (B23001000119 + B23001000121) + """
"""(B23001000126 + B23001000128) + (B23001000133 + B23001000135) + (B23001000140 + B23001000142) + """
"""(B23001000147 + B23001000149) + (B23001000154 + B23001000156) + (B23001000161 + B23001000166 + B23001000171)""",
"""update %s set employment3 = (B23001000008 + B23001000015 + B23001000022 + """
"""B23001000029 + B23001000036 + B23001000043 + B23001000050 + B23001000057 + B23001000064 +"""
"""B23001000071 + B23001000076 + B23001000081 + B23001000086 + B23001000094 + B23001000101 +"""
"""B23001000108 + B23001000115 + B23001000122 + B23001000129 + B23001000136 + B23001000143 +"""
"""B23001000150 + B23001000157 + B23001000162 + B23001000167 + B23001000172) """,
"""update %s set employment4 = (B23001000009 + B23001000016 + B23001000023 + """
"""B23001000030 + B23001000037 + B23001000044 + B23001000051 + B23001000058 + B23001000065 +"""
"""B23001000072 + B23001000077 + B23001000082 + B23001000087 + B23001000095 + B23001000102 +"""
"""B23001000109 + B23001000116 + B23001000123 + B23001000130 + B23001000137 + B23001000144 +"""
"""B23001000151 + B23001000158 + B23001000163 + B23001000168 + B23001000173) """,
"update %s set employment1 = gender1 + gender2 - employment2 - employment3 - employment4",
"update %s set groupquarter1 = B26001000001",
"update %s set hhldinc1 = B19001000002 + B19001000003",
"update %s set hhldinc2 = B19001000004 + B19001000005",
"update %s set hhldinc3 = B19001000006 + B19001000007",
"update %s set hhldinc4 = B19001000008 + B19001000009",
"update %s set hhldinc5 = B19001000010 + B19001000011",
"update %s set hhldinc6 = B19001000012 + B19001000013",
"update %s set hhldinc7 = B19001000014 + B19001000015",
"update %s set hhldinc8 = B19001000016 + B19001000017",
"update %s set hhldsize1 = B25009000003+B25009000011",
"update %s set hhldsize2 = B25009000004+B25009000012",
"update %s set hhldsize3 = B25009000005+B25009000013",
"update %s set hhldsize4 = B25009000006+B25009000014",
"update %s set hhldsize5 = B25009000007+B25009000015",
"update %s set hhldsize6 = B25009000008+B25009000016",
"update %s set hhldsize7 = B25009000009+B25009000017",
"update %s set hhldtype1 = B11001000003",
"update %s set hhldtype2 = B11001000005",
"update %s set hhldtype3 = B11001000006",
"update %s set hhldtype4 = B11001000008",
"update %s set hhldtype5 = B11001000009",
"""update %s set hhldrage1 = (B25007000003+B25007000004+B25007000005+B25007000006+B25007000007+B25007000008)+"""
"""(B25007000013+B25007000014+B25007000015+B25007000016+B25007000017+B25007000018)""",
"update %s set hhldrage2 = (B25007000009+ B25007000010+B25007000011)+(B25007000019+ B25007000020+B25007000021)",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"update %s set childpresence1 = C23007000002",
"update %s set childpresence2 = C23007000017 + hhldtype4 + hhldtype5",
"update %s set check_gender = gender1 + gender2",
"update %s set check_age = agep1+agep2+agep3+agep4+agep5+agep6+agep7+agep8+agep9+agep10",
"update %s set check_race = race1+race2+race3+race4+race5+race6+race7",
"update %s set check_race1 = race11+race12+race13+race14+race15+race16+race17",
"update %s set check_race2 = race21+race22+race23+race24+race25+race26+race27",
"update %s set check_employment = employment1 + employment2 + employment3 + employment4",
"update %s set check_type = hhldtype1+hhldtype2+hhldtype3+hhldtype4+hhldtype5",
"update %s set check_size = hhldsize1+hhldsize2+hhldsize3+hhldsize4+hhldsize5+hhldsize6+hhldsize7",
"update %s set check_hhldrage = hhldrage1+hhldrage2",
"update %s set check_inc = hhldinc1+hhldinc2+hhldinc3+hhldinc4+hhldinc5+hhldinc6+hhldinc7+hhldinc8",
"update %s set check_fam = hhldfam1+hhldfam2",
"update %s set check_child = childpresence1+childpresence2",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7 from %s"""]
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/synthesizer/gui/default_census_cat_transforms.py | Python | gpl-2.0 | 42,838 |
# -*- coding: utf-8 -*-
#
# test_errors.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for error handling
"""
import unittest
import nest
@nest.ll_api.check_stack
class ErrorTestCase(unittest.TestCase):
"""Tests if errors are handled correctly"""
def test_Raise(self):
"""Error raising"""
def raise_custom_exception(exc, msg):
raise exc(msg)
message = "test"
exception = nest.kernel.NESTError
self.assertRaisesRegex(
exception, message, raise_custom_exception, exception, message)
def test_StackUnderFlow(self):
"""Stack underflow"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "StackUnderflow", nest.ll_api.sr, 'clear ;')
def test_DivisionByZero(self):
"""Division by zero"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "DivisionByZero", nest.ll_api.sr, '1 0 div')
def test_UnknownNode(self):
"""Unknown node"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "UnknownNode", nest.Connect, (99, ), (99, ))
def test_UnknownModel(self):
"""Unknown model name"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.kernel.NESTError, "UnknownModelName", nest.Create, -1)
def suite():
suite = unittest.makeSuite(ErrorTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| hakonsbm/nest-simulator | pynest/nest/tests/test_errors.py | Python | gpl-2.0 | 2,240 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington and Kai Nagel
# See opus_core/LICENSE
import os
import opus_matsim.sustain_city.tests as test_dir
from opus_core.tests import opus_unittest
from opus_core.store.csv_storage import csv_storage
from urbansim.datasets.travel_data_dataset import TravelDataDataset
from numpy import *
import numpy
from opus_core.logger import logger
class MatrixTest(opus_unittest.OpusTestCase):
""" Testing access to travel data values stored in numpy arrays
"""
def setUp(self):
print "Entering setup"
# get sensitivity test path
self.test_dir_path = test_dir.__path__[0]
# get location to travel data table
self.input_directory = os.path.join( self.test_dir_path, 'data', 'travel_cost')
logger.log_status("input_directory: %s" % self.input_directory)
# check source file
if not os.path.exists( self.input_directory ):
raise('File not found! %s' % self.input_directory)
print "Leaving setup"
def test_run(self):
print "Entering test run"
# This test loads an exising travel data as a TravelDataSet (numpy array)
# and accesses single (pre-known) values to validate the conversion process
# (numpy array into standard python list).
#
# Here an example:
# my_list = [[1,2,3],
# [4,5,6],
# [7,8,9]]
#
# my_list[0][1] should be = 2
# my_list[2][2] should be = 9
table_name = 'travel_data'
travel_data_attribute = 'single_vehicle_to_work_travel_cost'
# location of pre-calculated MATSim travel costs
in_storage = csv_storage(storage_location = self.input_directory)
# create travel data set (travel costs)
travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
# converting from numpy array into a 2d list
travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
# get two values for validation
value1 = int(travel_list[1][1]) # should be = 0
value2 = int(travel_list[2][1]) # should be = 120
logger.log_status('First validation value should be 0. Current value is %i' % value1)
logger.log_status('Second validation value should be 120. Current value is %i' % value2)
self.assertTrue( value1 == 0 )
self.assertTrue( value2 == 120 )
# self.dump_travel_list(travel_list) # for debugging
print "Leaving test run"
def dump_travel_list(self, travel_list):
''' Dumping travel_list for debugging reasons...
'''
dest = os.path.join( os.environ['OPUS_HOME'], 'opus_matsim', 'tmp')
if not os.path.exists(dest):
os.makedirs(dest)
travel = os.path.join(dest, 'travelFile.txt')
f = open(travel, "w")
f.write( str(travel_list) )
f.close()
if __name__ == "__main__":
#mt = MatrixTest() # for debugging
#mt.test_run() # for debugging
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_matsim/sustain_city/tests/matsim_coupeling/matrix_test.py | Python | gpl-2.0 | 3,343 |
Subsets and Splits