repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
openstack/senlin | senlin/tests/unit/objects/requests/test_nodes.py | 1 | 8333 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from senlin.objects.requests import nodes
from senlin.tests.unit.common import base as test_base
CONF = cfg.CONF
CONF.import_opt('default_action_timeout', 'senlin.conf')
class TestNodeCreate(test_base.SenlinTestCase):
body = {
'name': 'test-node',
'profile_id': 'test-profile',
}
def test_node_create_request_body(self):
sot = nodes.NodeCreateRequestBody(**self.body)
self.assertEqual('test-node', sot.name)
self.assertEqual('test-profile', sot.profile_id)
sot.obj_set_defaults()
self.assertEqual('', sot.cluster_id)
self.assertEqual('', sot.role)
self.assertEqual({}, sot.metadata)
def test_node_create_request_body_full(self):
body = copy.deepcopy(self.body)
body['role'] = 'master'
body['cluster_id'] = 'cluster-01'
body['metadata'] = {'foo': 'bar'}
sot = nodes.NodeCreateRequestBody(**body)
self.assertEqual('test-node', sot.name)
self.assertEqual('test-profile', sot.profile_id)
self.assertEqual('cluster-01', sot.cluster_id)
self.assertEqual('master', sot.role)
self.assertEqual({'foo': 'bar'}, sot.metadata)
def test_request_body_to_primitive(self):
sot = nodes.NodeCreateRequestBody(**self.body)
res = sot.obj_to_primitive()
self.assertEqual(
{
'name': u'test-node',
'profile_id': u'test-profile'
},
res['senlin_object.data']
)
self.assertEqual('NodeCreateRequestBody',
res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
self.assertIn('profile_id', res['senlin_object.changes'])
self.assertIn('name', res['senlin_object.changes'])
def test_request_to_primitive(self):
body = nodes.NodeCreateRequestBody(**self.body)
request = {'node': body}
sot = nodes.NodeCreateRequest(**request)
self.assertIsInstance(sot.node, nodes.NodeCreateRequestBody)
self.assertEqual('test-node', sot.node.name)
self.assertEqual('test-profile', sot.node.profile_id)
res = sot.obj_to_primitive()
self.assertEqual(['node'], res['senlin_object.changes'])
self.assertEqual('NodeCreateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
data = res['senlin_object.data']['node']
self.assertIn('profile_id', data['senlin_object.changes'])
self.assertIn('name', data['senlin_object.changes'])
self.assertEqual('NodeCreateRequestBody',
data['senlin_object.name'])
self.assertEqual('senlin', data['senlin_object.namespace'])
self.assertEqual('1.0', data['senlin_object.version'])
self.assertEqual(
{'name': u'test-node', 'profile_id': u'test-profile'},
data['senlin_object.data']
)
class TestNodeList(test_base.SenlinTestCase):
def test_node_list_request_body_full(self):
params = {
'cluster_id': '8c3c9af7-d768-4c5a-a21e-5261b22d749d',
'name': ['node01'],
'status': ['ACTIVE'],
'limit': 3,
'marker': 'f1ed0d50-7651-4599-a8cb-c86e9c7123f5',
'sort': 'name:asc',
'project_safe': False,
}
sot = nodes.NodeListRequest(**params)
self.assertEqual('8c3c9af7-d768-4c5a-a21e-5261b22d749d',
sot.cluster_id)
self.assertEqual(['node01'], sot.name)
self.assertEqual(['ACTIVE'], sot.status)
self.assertEqual(3, sot.limit)
self.assertEqual('f1ed0d50-7651-4599-a8cb-c86e9c7123f5', sot.marker)
self.assertEqual('name:asc', sot.sort)
self.assertFalse(sot.project_safe)
def test_node_list_request_body_default(self):
sot = nodes.NodeListRequest()
sot.obj_set_defaults()
self.assertTrue(sot.project_safe)
class TestNodeGet(test_base.SenlinTestCase):
def test_node_get_request_full(self):
params = {
'identity': 'node-001',
'show_details': True,
}
sot = nodes.NodeGetRequest(**params)
self.assertEqual('node-001', sot.identity)
self.assertTrue(sot.show_details)
def test_node_get_request_default(self):
sot = nodes.NodeGetRequest()
sot.obj_set_defaults()
self.assertFalse(sot.show_details)
class TestNodeUpdate(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'name': 'test-node-newname',
'profile_id': 'test-profile',
'metadata': {'foo': 'bar'},
'role': 'master'
}
def test_node_update_request(self):
sot = nodes.NodeUpdateRequest(**self.body)
self.assertEqual('test-node', sot.identity)
self.assertEqual('test-node-newname', sot.name)
self.assertEqual('test-profile', sot.profile_id)
self.assertEqual('master', sot.role)
self.assertEqual({'foo': 'bar'}, sot.metadata)
class TestNodeDelete(test_base.SenlinTestCase):
body = {
'identity': 'test-node'
}
def test_node_delete_request(self):
sot = nodes.NodeDeleteRequest(**self.body)
self.assertEqual('test-node', sot.identity)
class TestNodeCheck(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'params': {'foo': 'bar'},
}
def test_node_check_request(self):
sot = nodes.NodeCheckRequest(**self.body)
self.assertEqual({'foo': 'bar'}, sot.params)
class TestNodeRecover(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'params': {'foo': 'bar'},
}
def test_node_recover_request(self):
sot = nodes.NodeRecoverRequest(**self.body)
self.assertEqual({'foo': 'bar'}, sot.params)
class TestNodeOperation(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'operation': 'dance',
'params': {'foo': 'bar'},
}
def test_node_operation_request(self):
sot = nodes.NodeOperationRequest(**self.body)
self.assertEqual('test-node', sot.identity)
self.assertEqual('dance', sot.operation)
self.assertEqual({'foo': 'bar'}, sot.params)
class TestNodeAdopt(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'type': 'test-type',
'name': 'test-name',
'cluster': 'test-cluster',
'role': 'test-role',
'metadata': {'key': 'value'},
'overrides': {'foo': 'bar'},
'snapshot': True
}
def test_node_adopt_request(self):
sot = nodes.NodeAdoptRequest(**self.body)
self.assertEqual('test-node', sot.identity)
self.assertEqual('test-type', sot.type)
self.assertEqual('test-name', sot.name)
self.assertEqual('test-cluster', sot.cluster)
self.assertEqual('test-role', sot.role)
self.assertEqual({'key': 'value'}, sot.metadata)
self.assertEqual({'foo': 'bar'}, sot.overrides)
self.assertTrue(sot.snapshot)
class TestNodeAdoptPreview(test_base.SenlinTestCase):
body = {
'identity': 'test-node',
'type': 'test-type',
'overrides': {'foo': 'bar'},
'snapshot': True
}
def test_node_adopt_request(self):
sot = nodes.NodeAdoptPreviewRequest(**self.body)
self.assertEqual('test-node', sot.identity)
self.assertEqual('test-type', sot.type)
self.assertEqual({'foo': 'bar'}, sot.overrides)
self.assertTrue(sot.snapshot)
| apache-2.0 | -119,337,664,729,296,670 | 32.332 | 76 | 0.610344 | false |
cbeighley/peregrine | peregrine/iqgen/bits/satellite_factory.py | 2 | 2905 | # Copyright (C) 2016 Swift Navigation Inc.
# Contact: Valeri Atamaniouk <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`peregrine.iqgen.bits.satellite_factory` module contains classes and
functions related to object factory for satellite objects.
"""
from peregrine.iqgen.bits.satellite_gps import GPSSatellite
from peregrine.iqgen.bits.amplitude_factory import factoryObject as amplitudeOF
from peregrine.iqgen.bits.doppler_factory import factoryObject as dopplerOF
from peregrine.iqgen.bits.message_factory import factoryObject as messageOF
class ObjectFactory(object):
'''
Object factory for satellite types.
'''
def __init__(self):
super(ObjectFactory, self).__init__()
def toMapForm(self, obj):
t = type(obj)
if t is GPSSatellite:
return self.__GPSSatellite_ToMap(obj)
else:
raise ValueError("Invalid object type")
def fromMapForm(self, data):
t = data['type']
if t == 'GPSSatellite':
return self.__MapTo_GPSSatellite(data)
else:
raise ValueError("Invalid object type")
def __GPSSatellite_ToMap(self, obj):
data = {'type': 'GPSSatellite',
'prn': obj.prn,
'amplitude': amplitudeOF.toMapForm(obj.getAmplitude()),
'l1caEnabled': obj.isL1CAEnabled(),
'l2cEnabled': obj.isL2CEnabled(),
'l1caMessage': messageOF.toMapForm(obj.getL1CAMessage()),
'l2cMessage': messageOF.toMapForm(obj.getL2CMessage()),
'doppler': dopplerOF.toMapForm(obj.getDoppler()),
'l2clCodeType': obj.getL2CLCodeType(),
'codeDopplerIgnored': obj.isCodeDopplerIgnored()
}
return data
def __MapTo_GPSSatellite(self, data):
prn = data['prn']
doppler = dopplerOF.fromMapForm(data['doppler'])
amplitude = amplitudeOF.fromMapForm(data['amplitude'])
l1caEnabled = data['l1caEnabled']
l2cEnabled = data['l2cEnabled']
l1caMessage = messageOF.fromMapForm(data['l1caMessage'])
l2cMessage = messageOF.fromMapForm(data['l2cMessage'])
clCodeType = data['l2clCodeType']
codeDopplerIgnored = data['codeDopplerIgnored']
satellite = GPSSatellite(prn)
satellite.setAmplitude(amplitude)
satellite.setDoppler(doppler)
satellite.setL1CAEnabled(l1caEnabled)
satellite.setL2CEnabled(l2cEnabled)
satellite.setL1CAMessage(l1caMessage)
satellite.setL2CMessage(l2cMessage)
satellite.setL2CLCodeType(clCodeType)
satellite.setCodeDopplerIgnored(codeDopplerIgnored)
return satellite
factoryObject = ObjectFactory()
| gpl-3.0 | -6,576,944,578,196,463,000 | 34.864198 | 79 | 0.713941 | false |
kaiweifan/vse-lbaas-plugin-poc | quantum/plugins/nec/drivers/__init__.py | 7 | 1479 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from quantum.openstack.common import importutils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DRIVER_PATH = "quantum.plugins.nec.drivers.%s"
DRIVER_LIST = {
'trema': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver",
'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver",
'pfc': DRIVER_PATH % "pfc.PFCV4Driver",
'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver",
'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver"}
def get_driver(driver_name):
LOG.info(_("Loading OFC driver: %s"), driver_name)
driver_klass = DRIVER_LIST.get(driver_name) or driver_name
return importutils.import_class(driver_klass)
| apache-2.0 | 6,007,911,006,154,109,000 | 38.972973 | 78 | 0.718053 | false |
lokeshjindal15/pd-gem5 | configs/common/CacheConfig.py | 1 | 11310 |
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
# Configure the M5 cache hierarchy config in one place
#
import m5
from m5.objects import *
from Caches import *
from O3_ARM_v7a import *
def config_cache(options, system):
if options.cpu_type == "arm_detailed":
#try:
# from O3_ARM_v7a import *
#except:
# print "arm_detailed is unavailable. Did you compile the O3 model?"
# sys.exit(1)
dcache_class = O3_ARM_v7a_DCache
icache_class = O3_ARM_v7a_ICache
l2_cache_class = O3_ARM_v7aL2
l3_cache_class = O3_ARM_v7aL3
else:
dcache_class, icache_class, l2_cache_class = \
L1Cache, L1Cache, L2Cache
# Set the cache line size of the system
system.cache_line_size = options.cacheline_size
if ( options.l2cache or ( options.cpu_type == 'arm_detailed' ) ):
# Provide a clock for the L2 and the L1-to-L2 bus here as they
# are not connected using addTwoLevelCacheHierarchy. Use the
# same clock as the CPUs, and set the L1-to-L2 bus width to 32
# bytes (256 bits).
if options.cpu_type == "arm_detailed":
system.l3 = l3_cache_class(clk_domain=system.clk_domain_const)
system.tol3bus = CoherentXBar(clk_domain = system.cpu_clk_domain,
width = 32)
system.l3.cpu_side = system.tol3bus.master
system.l3.mem_side = system.membus.slave
else:
system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain,
size=options.l2_size,
assoc=options.l2_assoc)
system.tol2bus = CoherentXBar(clk_domain = system.cpu_clk_domain,
width = 32)
system.l2.cpu_side = system.tol2bus.master
system.l2.mem_side = system.membus.slave
if options.memchecker:
system.memchecker = MemChecker()
for i in xrange(options.num_cpus):
if options.caches:
if options.cpu_type == 'arm_detailed':
icache = icache_class()
dcache = dcache_class()
l2 = l2_cache_class()
else:
icache = icache_class(size=options.l1i_size,
assoc=options.l1i_assoc)
dcache = dcache_class(size=options.l1d_size,
assoc=options.l1d_assoc)
if options.memchecker:
dcache_mon = MemCheckerMonitor(warn_only=True)
dcache_real = dcache
# Do not pass the memchecker into the constructor of
# MemCheckerMonitor, as it would create a copy; we require
# exactly one MemChecker instance.
dcache_mon.memchecker = system.memchecker
# Connect monitor
dcache_mon.mem_side = dcache.cpu_side
# Let CPU connect to monitors
dcache = dcache_mon
# When connecting the caches, the clock is also inherited
# from the CPU in question
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].addPrivateSplitL1Caches(icache, dcache,
PageTableWalkerCache(),
PageTableWalkerCache())
elif options.cpu_type == 'arm_detailed':
system.cpu[i].addTwoLevelCacheHierarchy(icache,dcache,l2)
else:
system.cpu[i].addPrivateSplitL1Caches(icache, dcache)
if options.memchecker:
# The mem_side ports of the caches haven't been connected yet.
# Make sure connectAllPorts connects the right objects.
system.cpu[i].dcache = dcache_real
system.cpu[i].dcache_mon = dcache_mon
system.cpu[i].createInterruptController()
if options.cpu_type == 'arm_detailed':
system.cpu[i].connectAllPorts(system.tol3bus, system.membus)
elif options.l2cache:
system.cpu[i].connectAllPorts(system.tol2bus, system.membus)
else:
system.cpu[i].connectAllPorts(system.membus)
return system
#import m5
#from m5.objects import *
#from Caches import *
#def config_cache(options, system):
# if options.cpu_type == "arm_detailed":
# try:
# from O3_ARM_v7a import *
# except:
# print "arm_detailed is unavailable. Did you compile the O3 model?"
# sys.exit(1)
# dcache_class, icache_class, l2_cache_class = \
# O3_ARM_v7a_DCache, O3_ARM_v7a_ICache, O3_ARM_v7aL2
# elif options.cpu_type == 'piledriver':
# try:
# from PileDriver import *
# except:
# print "piledriver is unavailable. Did you compile the O3 model?"
# sys.exit(1)
# dcache_class, icache_class, l2_cache_class = \
# PileDriver_DCache, PileDriver_ICache, PileDriver_L2
#
# else:
# dcache_class, icache_class, l2_cache_class = \
# L1Cache, L1Cache, L2Cache
# # Set the cache line size of the system
# system.cache_line_size = options.cacheline_size
# if options.cpu_type == "piledriver":
# # For PileDriver we have specified l2 size in the configuration file
# # Also, I think bus width of "cpu to l2" is 64 bits but for now stick with 32, because I got an error and it #may be from this
# system.l2 = l2_cache_class(clk_domain = system.cpu_clk_domain)
# system.tol2bus = CoherentXBar(clk_domain = system.cpu_clk_domain,
# width = 32)
# system.l2.cpu_side = system.tol2bus.master
# system.l2.mem_side = system.membus.slave
# if options.l2cache:
# Provide a clock for the L2 and the L1-to-L2 bus here as they
# are not connected using addTwoLevelCacheHierarchy. Use the
# same clock as the CPUs, and set the L1-to-L2 bus width to 32
# bytes (256 bits).
# system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain,
# system.l2 = l2_cache_class(clk_domain=system.clk_domain_const, # lokeshjindal15
# size=options.l2_size,
# assoc=options.l2_assoc)
#system.tol2bus = CoherentXBar(clk_domain = system.cpu_clk_domain,
# system.tol2bus = CoherentXBar(clk_domain = system.clk_domain_const, # lokeshjindal15
# width = 32)
# system.l2.cpu_side = system.tol2bus.master
# system.l2.mem_side = system.membus.slave
# if options.memchecker:
# system.memchecker = MemChecker()
# for i in xrange(options.num_cpus):
# if options.cpu_type == "piledriver":
# icache = icache_class()
# dcache = dcache_class()
# if buildEnv['TARGET_ISA'] == 'x86':
# system.cpu[i].addPrivateSplitL1Caches(icache, dcache,
# PageTableWalkerCache(),
# PageTableWalkerCache())
# else:
# system.cpu[i].addPrivateSplitL1Caches(icache, dcache)
# if options.caches:
# icache = icache_class(size=options.l1i_size,
# assoc=options.l1i_assoc)
# dcache = dcache_class(size=options.l1d_size,
# assoc=options.l1d_assoc)
#
# if options.memchecker:
# dcache_mon = MemCheckerMonitor(warn_only=True)
# dcache_real = dcache
# Do not pass the memchecker into the constructor of
# MemCheckerMonitor, as it would create a copy; we require
# exactly one MemChecker instance.
# dcache_mon.memchecker = system.memchecker
# Connect monitor
# dcache_mon.mem_side = dcache.cpu_side
# Let CPU connect to monitors
# dcache = dcache_mon
# When connecting the caches, the clock is also inherited
# from the CPU in question
# if buildEnv['TARGET_ISA'] == 'x86':
# system.cpu[i].addPrivateSplitL1Caches(icache, dcache,
# PageTableWalkerCache(),
# PageTableWalkerCache())
# else:
# system.cpu[i].addPrivateSplitL1Caches(icache, dcache)
# if options.memchecker:
# The mem_side ports of the caches haven't been connected yet.
# Make sure connectAllPorts connects the right objects.
# system.cpu[i].dcache = dcache_real
# system.cpu[i].dcache_mon = dcache_mon
# system.cpu[i].createInterruptController()
# if options.l2cache or options.cpu_type == "piledriver":
# system.cpu[i].connectAllPorts(system.tol2bus, system.membus)
# else:
# system.cpu[i].connectAllPorts(system.membus)
# return system
| bsd-3-clause | -2,735,727,477,452,770,300 | 42.167939 | 135 | 0.601238 | false |
vlvkobal/netdata | collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py | 4 | 1492 | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
Backports the Python 3 ``socket.makefile`` method for use with anything that
wants to create a "fake" socket object.
"""
import io
from socket import SocketIO
def backport_makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= set(["r", "w", "b"]):
raise ValueError(
"invalid mode %r (only r, w, b allowed)" % (mode,)
)
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
| gpl-3.0 | 1,888,776,068,204,492,800 | 26.62963 | 76 | 0.593164 | false |
birkbeckOLH/annotran | annotran/languages/schemas.py | 2 | 2276 | """
Copyright (c) 2013-2014 Hypothes.is Project and contributors
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# -*- coding: utf-8 -*-
import colander
import deform
import deform.widget
from annotran.languages.models import LANGUAGE_NAME_MAX_LENGTH
from annotran.languages.models import LANGUAGE_NAME_MIN_LENGTH
from h import i18n
from h.accounts.schemas import CSRFSchema
_ = i18n.TranslationString
class LanguageSchema(CSRFSchema):
"""The schema for the create-a-new-language form."""
name = colander.SchemaNode(
colander.String(),
title=_("What do you want to call the language?"),
validator=colander.Length(
min=LANGUAGE_NAME_MIN_LENGTH,
max=LANGUAGE_NAME_MAX_LENGTH),
widget=deform.widget.TextInputWidget(
autofocus=True,
css_class="language-form__name-input js-language-name-input",
disable_autocomplete=True,
label_css_class="language-form__name-label",
max_length=LANGUAGE_NAME_MAX_LENGTH,
placeholder=_("Language Name")))
| mit | 4,128,786,728,090,736,600 | 41.148148 | 79 | 0.75 | false |
kaldonis/ft-event-manager | src/lib/webob/exc.py | 3 | 36095 | """
HTTP Exception
--------------
This module processes Python exceptions that relate to HTTP exceptions
by defining a set of exceptions, all subclasses of HTTPException.
Each exception, in addition to being a Python exception that can be
raised and caught, is also a WSGI application and ``webob.Response``
object.
This module defines exceptions according to RFC 2068 [1]_ : codes with
100-300 are not really errors; 400's are client errors, and 500's are
server errors. According to the WSGI specification [2]_ , the application
can call ``start_response`` more then once only under two conditions:
(a) the response has not yet been sent, or (b) if the second and
subsequent invocations of ``start_response`` have a valid ``exc_info``
argument obtained from ``sys.exc_info()``. The WSGI specification then
requires the server or gateway to handle the case where content has been
sent and then an exception was encountered.
Exception
HTTPException
HTTPOk
* 200 - HTTPOk
* 201 - HTTPCreated
* 202 - HTTPAccepted
* 203 - HTTPNonAuthoritativeInformation
* 204 - HTTPNoContent
* 205 - HTTPResetContent
* 206 - HTTPPartialContent
HTTPRedirection
* 300 - HTTPMultipleChoices
* 301 - HTTPMovedPermanently
* 302 - HTTPFound
* 303 - HTTPSeeOther
* 304 - HTTPNotModified
* 305 - HTTPUseProxy
* 306 - Unused (not implemented, obviously)
* 307 - HTTPTemporaryRedirect
HTTPError
HTTPClientError
* 400 - HTTPBadRequest
* 401 - HTTPUnauthorized
* 402 - HTTPPaymentRequired
* 403 - HTTPForbidden
* 404 - HTTPNotFound
* 405 - HTTPMethodNotAllowed
* 406 - HTTPNotAcceptable
* 407 - HTTPProxyAuthenticationRequired
* 408 - HTTPRequestTimeout
* 409 - HTTPConflict
* 410 - HTTPGone
* 411 - HTTPLengthRequired
* 412 - HTTPPreconditionFailed
* 413 - HTTPRequestEntityTooLarge
* 414 - HTTPRequestURITooLong
* 415 - HTTPUnsupportedMediaType
* 416 - HTTPRequestRangeNotSatisfiable
* 417 - HTTPExpectationFailed
* 428 - HTTPPreconditionRequired
* 429 - HTTPTooManyRequests
* 431 - HTTPRequestHeaderFieldsTooLarge
HTTPServerError
* 500 - HTTPInternalServerError
* 501 - HTTPNotImplemented
* 502 - HTTPBadGateway
* 503 - HTTPServiceUnavailable
* 504 - HTTPGatewayTimeout
* 505 - HTTPVersionNotSupported
* 511 - HTTPNetworkAuthenticationRequired
Subclass usage notes:
---------------------
The HTTPException class is complicated by 4 factors:
1. The content given to the exception may either be plain-text or
as html-text.
2. The template may want to have string-substitutions taken from
the current ``environ`` or values from incoming headers. This
is especially troublesome due to case sensitivity.
3. The final output may either be text/plain or text/html
mime-type as requested by the client application.
4. Each exception has a default explanation, but those who
raise exceptions may want to provide additional detail.
Subclass attributes and call parameters are designed to provide an easier path
through the complications.
Attributes:
``code``
the HTTP status code for the exception
``title``
remainder of the status line (stuff after the code)
``explanation``
a plain-text explanation of the error message that is
not subject to environment or header substitutions;
it is accessible in the template via %(explanation)s
``detail``
a plain-text message customization that is not subject
to environment or header substitutions; accessible in
the template via %(detail)s
``body_template``
a content fragment (in HTML) used for environment and
header substitution; the default template includes both
the explanation and further detail provided in the
message
Parameters:
``detail``
a plain-text override of the default ``detail``
``headers``
a list of (k,v) header pairs
``comment``
a plain-text additional information which is
usually stripped/hidden for end-users
``body_template``
a string.Template object containing a content fragment in HTML
that frames the explanation and further detail
To override the template (which is HTML content) or the plain-text
explanation, one must subclass the given exception; or customize it
after it has been created. This particular breakdown of a message
into explanation, detail and template allows both the creation of
plain-text and html messages for various clients as well as
error-free substitution of environment variables and headers.
The subclasses of :class:`~_HTTPMove`
(:class:`~HTTPMultipleChoices`, :class:`~HTTPMovedPermanently`,
:class:`~HTTPFound`, :class:`~HTTPSeeOther`, :class:`~HTTPUseProxy` and
:class:`~HTTPTemporaryRedirect`) are redirections that require a ``Location``
field. Reflecting this, these subclasses have two additional keyword arguments:
``location`` and ``add_slash``.
Parameters:
``location``
to set the location immediately
``add_slash``
set to True to redirect to the same URL as the request, except with a
``/`` appended
Relative URLs in the location will be resolved to absolute.
References:
.. [1] http://www.python.org/peps/pep-0333.html#error-handling
.. [2] http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5
"""
from string import Template
import re
import sys
from webob.compat import (
class_types,
text_,
text_type,
urlparse,
)
from webob.request import Request
from webob.response import Response
from webob.util import (
html_escape,
warn_deprecation,
)
tag_re = re.compile(r'<.*?>', re.S)
br_re = re.compile(r'<br.*?>', re.I|re.S)
comment_re = re.compile(r'<!--|-->')
def no_escape(value):
if value is None:
return ''
if not isinstance(value, text_type):
if hasattr(value, '__unicode__'):
value = value.__unicode__()
if isinstance(value, bytes):
value = text_(value, 'utf-8')
else:
value = text_type(value)
return value
def strip_tags(value):
value = value.replace('\n', ' ')
value = value.replace('\r', '')
value = br_re.sub('\n', value)
value = comment_re.sub('', value)
value = tag_re.sub('', value)
return value
class HTTPException(Exception):
def __init__(self, message, wsgi_response):
Exception.__init__(self, message)
self.wsgi_response = wsgi_response
def __call__(self, environ, start_response):
return self.wsgi_response(environ, start_response)
# TODO: remove in version 1.3
@property
def exception(self):
warn_deprecation(
"As of WebOb 1.2, raise the HTTPException instance directly "
"instead of raising the result of 'HTTPException.exception'",
'1.3', 2)
return self
class WSGIHTTPException(Response, HTTPException):
## You should set in subclasses:
# code = 200
# title = 'OK'
# explanation = 'why this happens'
# body_template_obj = Template('response template')
code = None
title = None
explanation = ''
body_template_obj = Template('''\
${explanation}<br /><br />
${detail}
${html_comment}
''')
plain_template_obj = Template('''\
${status}
${body}''')
html_template_obj = Template('''\
<html>
<head>
<title>${status}</title>
</head>
<body>
<h1>${status}</h1>
${body}
</body>
</html>''')
## Set this to True for responses that should have no request body
empty_body = False
def __init__(self, detail=None, headers=None, comment=None,
body_template=None, **kw):
Response.__init__(self,
status='%s %s' % (self.code, self.title),
**kw)
Exception.__init__(self, detail)
if headers:
self.headers.extend(headers)
self.detail = detail
self.comment = comment
if body_template is not None:
self.body_template = body_template
self.body_template_obj = Template(body_template)
if self.empty_body:
del self.content_type
del self.content_length
def __str__(self):
return self.detail or self.explanation
def _make_body(self, environ, escape):
args = {
'explanation': escape(self.explanation),
'detail': escape(self.detail or ''),
'comment': escape(self.comment or ''),
}
if self.comment:
args['html_comment'] = '<!-- %s -->' % escape(self.comment)
else:
args['html_comment'] = ''
if WSGIHTTPException.body_template_obj is not self.body_template_obj:
# Custom template; add headers to args
for k, v in environ.items():
args[k] = escape(v)
for k, v in self.headers.items():
args[k.lower()] = escape(v)
t_obj = self.body_template_obj
return t_obj.substitute(args)
def plain_body(self, environ):
body = self._make_body(environ, no_escape)
body = strip_tags(body)
return self.plain_template_obj.substitute(status=self.status,
title=self.title,
body=body)
def html_body(self, environ):
body = self._make_body(environ, html_escape)
return self.html_template_obj.substitute(status=self.status,
body=body)
def generate_response(self, environ, start_response):
if self.content_length is not None:
del self.content_length
headerlist = list(self.headerlist)
accept = environ.get('HTTP_ACCEPT', '')
if accept and 'html' in accept or '*/*' in accept:
content_type = 'text/html'
body = self.html_body(environ)
else:
content_type = 'text/plain'
body = self.plain_body(environ)
extra_kw = {}
if isinstance(body, text_type):
extra_kw.update(charset='utf-8')
resp = Response(body,
status=self.status,
headerlist=headerlist,
content_type=content_type,
**extra_kw
)
resp.content_type = content_type
return resp(environ, start_response)
def __call__(self, environ, start_response):
is_head = environ['REQUEST_METHOD'] == 'HEAD'
if self.body or self.empty_body or is_head:
app_iter = Response.__call__(self, environ, start_response)
else:
app_iter = self.generate_response(environ, start_response)
if is_head:
app_iter = []
return app_iter
@property
def wsgi_response(self):
return self
class HTTPError(WSGIHTTPException):
"""
base class for status codes in the 400's and 500's
This is an exception which indicates that an error has occurred,
and that any work in progress should not be committed. These are
typically results in the 400's and 500's.
"""
class HTTPRedirection(WSGIHTTPException):
"""
base class for 300's status code (redirections)
This is an abstract base class for 3xx redirection. It indicates
that further action needs to be taken by the user agent in order
to fulfill the request. It does not necessarly signal an error
condition.
"""
class HTTPOk(WSGIHTTPException):
"""
Base class for the 200's status code (successful responses)
code: 200, title: OK
"""
code = 200
title = 'OK'
############################################################
## 2xx success
############################################################
class HTTPCreated(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that request has been fulfilled and resulted in a new
resource being created.
code: 201, title: Created
"""
code = 201
title = 'Created'
class HTTPAccepted(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the request has been accepted for processing, but the
processing has not been completed.
code: 202, title: Accepted
"""
code = 202
title = 'Accepted'
explanation = 'The request is accepted for processing.'
class HTTPNonAuthoritativeInformation(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the returned metainformation in the entity-header is
not the definitive set as available from the origin server, but is
gathered from a local or a third-party copy.
code: 203, title: Non-Authoritative Information
"""
code = 203
title = 'Non-Authoritative Information'
class HTTPNoContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the server has fulfilled the request but does
not need to return an entity-body, and might want to return updated
metainformation.
code: 204, title: No Content
"""
code = 204
title = 'No Content'
empty_body = True
class HTTPResetContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the the server has fulfilled the request and
the user agent SHOULD reset the document view which caused the
request to be sent.
code: 205, title: Reset Content
"""
code = 205
title = 'Reset Content'
empty_body = True
class HTTPPartialContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the server has fulfilled the partial GET
request for the resource.
code: 206, title: Partial Content
"""
code = 206
title = 'Partial Content'
############################################################
## 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
"""
redirections which require a Location field
Since a 'Location' header is a required attribute of 301, 302, 303,
305 and 307 (but not 304), this base class provides the mechanics to
make this easy.
You can provide a location keyword argument to set the location
immediately. You may also give ``add_slash=True`` if you want to
redirect to the same URL as the request, except with a ``/`` added
to the end.
Relative URLs in the location will be resolved to absolute.
"""
explanation = 'The resource has been moved to'
body_template_obj = Template('''\
${explanation} <a href="${location}">${location}</a>;
you should be redirected automatically.
${detail}
${html_comment}''')
def __init__(self, detail=None, headers=None, comment=None,
body_template=None, location=None, add_slash=False):
super(_HTTPMove, self).__init__(
detail=detail, headers=headers, comment=comment,
body_template=body_template)
if location is not None:
self.location = location
if add_slash:
raise TypeError(
"You can only provide one of the arguments location "
"and add_slash")
self.add_slash = add_slash
def __call__(self, environ, start_response):
req = Request(environ)
if self.add_slash:
url = req.path_url
url += '/'
if req.environ.get('QUERY_STRING'):
url += '?' + req.environ['QUERY_STRING']
self.location = url
self.location = urlparse.urljoin(req.path_url, self.location)
return super(_HTTPMove, self).__call__(
environ, start_response)
class HTTPMultipleChoices(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource corresponds to any one
of a set of representations, each with its own specific location,
and agent-driven negotiation information is being provided so that
the user can select a preferred representation and redirect its
request to that location.
code: 300, title: Multiple Choices
"""
code = 300
title = 'Multiple Choices'
class HTTPMovedPermanently(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource has been assigned a new
permanent URI and any future references to this resource SHOULD use
one of the returned URIs.
code: 301, title: Moved Permanently
"""
code = 301
title = 'Moved Permanently'
class HTTPFound(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource resides temporarily under
a different URI.
code: 302, title: Found
"""
code = 302
title = 'Found'
explanation = 'The resource was found at'
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the response to the request can be found under
a different URI and SHOULD be retrieved using a GET method on that
resource.
code: 303, title: See Other
"""
code = 303
title = 'See Other'
class HTTPNotModified(HTTPRedirection):
"""
subclass of :class:`~HTTPRedirection`
This indicates that if the client has performed a conditional GET
request and access is allowed, but the document has not been
modified, the server SHOULD respond with this status code.
code: 304, title: Not Modified
"""
# TODO: this should include a date or etag header
code = 304
title = 'Not Modified'
empty_body = True
class HTTPUseProxy(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource MUST be accessed through
the proxy given by the Location field.
code: 305, title: Use Proxy
"""
# Not a move, but looks a little like one
code = 305
title = 'Use Proxy'
explanation = (
'The resource must be accessed through a proxy located at')
class HTTPTemporaryRedirect(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource resides temporarily
under a different URI.
code: 307, title: Temporary Redirect
"""
code = 307
title = 'Temporary Redirect'
############################################################
## 4xx client error
############################################################
class HTTPClientError(HTTPError):
"""
base class for the 400's, where the client is in error
This is an error condition in which the client is presumed to be
in-error. This is an expected problem, and thus is not considered
a bug. A server-side traceback is not warranted. Unless specialized,
this is a '400 Bad Request'
"""
code = 400
title = 'Bad Request'
explanation = ('The server could not comply with the request since\r\n'
'it is either malformed or otherwise incorrect.\r\n')
class HTTPBadRequest(HTTPClientError):
pass
class HTTPUnauthorized(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the request requires user authentication.
code: 401, title: Unauthorized
"""
code = 401
title = 'Unauthorized'
explanation = (
'This server could not verify that you are authorized to\r\n'
'access the document you requested. Either you supplied the\r\n'
'wrong credentials (e.g., bad password), or your browser\r\n'
'does not understand how to supply the credentials required.\r\n')
class HTTPPaymentRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
code: 402, title: Payment Required
"""
code = 402
title = 'Payment Required'
explanation = ('Access was denied for financial reasons.')
class HTTPForbidden(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server understood the request, but is
refusing to fulfill it.
code: 403, title: Forbidden
"""
code = 403
title = 'Forbidden'
explanation = ('Access was denied to this resource.')
class HTTPNotFound(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server did not find anything matching the
Request-URI.
code: 404, title: Not Found
"""
code = 404
title = 'Not Found'
explanation = ('The resource could not be found.')
class HTTPMethodNotAllowed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the method specified in the Request-Line is
not allowed for the resource identified by the Request-URI.
code: 405, title: Method Not Allowed
"""
code = 405
title = 'Method Not Allowed'
# override template since we need an environment variable
body_template_obj = Template('''\
The method ${REQUEST_METHOD} is not allowed for this resource. <br /><br />
${detail}''')
class HTTPNotAcceptable(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates the resource identified by the request is only
capable of generating response entities which have content
characteristics not acceptable according to the accept headers
sent in the request.
code: 406, title: Not Acceptable
"""
code = 406
title = 'Not Acceptable'
# override template since we need an environment variable
template = Template('''\
The resource could not be generated that was acceptable to your browser
(content of type ${HTTP_ACCEPT}. <br /><br />
${detail}''')
class HTTPProxyAuthenticationRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This is similar to 401, but indicates that the client must first
authenticate itself with the proxy.
code: 407, title: Proxy Authentication Required
"""
code = 407
title = 'Proxy Authentication Required'
explanation = ('Authentication with a local proxy is needed.')
class HTTPRequestTimeout(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the client did not produce a request within
the time that the server was prepared to wait.
code: 408, title: Request Timeout
"""
code = 408
title = 'Request Timeout'
explanation = ('The server has waited too long for the request to '
'be sent by the client.')
class HTTPConflict(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the request could not be completed due to a
conflict with the current state of the resource.
code: 409, title: Conflict
"""
code = 409
title = 'Conflict'
explanation = ('There was a conflict when trying to complete '
'your request.')
class HTTPGone(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the requested resource is no longer available
at the server and no forwarding address is known.
code: 410, title: Gone
"""
code = 410
title = 'Gone'
explanation = ('This resource is no longer available. No forwarding '
'address is given.')
class HTTPLengthRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the the server refuses to accept the request
without a defined Content-Length.
code: 411, title: Length Required
"""
code = 411
title = 'Length Required'
explanation = ('Content-Length header required.')
class HTTPPreconditionFailed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
code: 412, title: Precondition Failed
"""
code = 412
title = 'Precondition Failed'
explanation = ('Request precondition failed.')
class HTTPRequestEntityTooLarge(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to process a request
because the request entity is larger than the server is willing or
able to process.
code: 413, title: Request Entity Too Large
"""
code = 413
title = 'Request Entity Too Large'
explanation = ('The body of your request was too large for this server.')
class HTTPRequestURITooLong(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to service the request
because the Request-URI is longer than the server is willing to
interpret.
code: 414, title: Request-URI Too Long
"""
code = 414
title = 'Request-URI Too Long'
explanation = ('The request URI was too long for this server.')
class HTTPUnsupportedMediaType(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to service the request
because the entity of the request is in a format not supported by
the requested resource for the requested method.
code: 415, title: Unsupported Media Type
"""
code = 415
title = 'Unsupported Media Type'
# override template since we need an environment variable
template_obj = Template('''\
The request media type ${CONTENT_TYPE} is not supported by this server.
<br /><br />
${detail}''')
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
The server SHOULD return a response with this status code if a
request included a Range request-header field, and none of the
range-specifier values in this field overlap the current extent
of the selected resource, and the request did not include an
If-Range request-header field.
code: 416, title: Request Range Not Satisfiable
"""
code = 416
title = 'Request Range Not Satisfiable'
explanation = ('The Range requested is not available.')
class HTTPExpectationFailed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indidcates that the expectation given in an Expect
request-header field could not be met by this server.
code: 417, title: Expectation Failed
"""
code = 417
title = 'Expectation Failed'
explanation = ('Expectation failed.')
class HTTPUnprocessableEntity(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unable to process the contained
instructions. Only for WebDAV.
code: 422, title: Unprocessable Entity
"""
## Note: from WebDAV
code = 422
title = 'Unprocessable Entity'
explanation = 'Unable to process the contained instructions'
class HTTPLocked(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the resource is locked. Only for WebDAV
code: 423, title: Locked
"""
## Note: from WebDAV
code = 423
title = 'Locked'
explanation = ('The resource is locked')
class HTTPFailedDependency(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the method could not be performed because the
requested action depended on another action and that action failed.
Only for WebDAV.
code: 424, title: Failed Dependency
"""
## Note: from WebDAV
code = 424
title = 'Failed Dependency'
explanation = (
'The method could not be performed because the requested '
'action dependended on another action and that action failed')
class HTTPPreconditionRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the origin server requires the request to be
conditional. From RFC 6585, "Additional HTTP Status Codes".
code: 428, title: Precondition Required
"""
code = 428
title = 'Precondition Required'
explanation = ('This request is required to be conditional')
class HTTPTooManyRequests(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the client has sent too many requests in a
given amount of time. Useful for rate limiting.
From RFC 6585, "Additional HTTP Status Codes".
code: 429, title: Too Many Requests
"""
code = 429
title = 'Too Many Requests'
explanation = (
'The client has sent too many requests in a given amount of time')
class HTTPRequestHeaderFieldsTooLarge(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unwilling to process the request
because its header fields are too large. The request may be resubmitted
after reducing the size of the request header fields.
From RFC 6585, "Additional HTTP Status Codes".
code: 431, title: Request Header Fields Too Large
"""
code = 431
title = 'Request Header Fields Too Large'
explanation = (
'The request header fields were too large')
class HTTPUnavailableForLegalReasons(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unable to process the request
because of legal reasons, e.g. censorship or government-mandated
blocked access.
From the draft "A New HTTP Status Code for Legally-restricted Resources"
by Tim Bray:
http://tools.ietf.org/html/draft-tbray-http-legally-restricted-status-00
code: 451, title: Unavailable For Legal Reasons
"""
code = 451
title = 'Unavailable For Legal Reasons'
explanation = ('The resource is not available due to legal reasons.')
############################################################
## 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
"""
base class for the 500's, where the server is in-error
This is an error condition in which the server is presumed to be
in-error. This is usually unexpected, and thus requires a traceback;
ideally, opening a support ticket for the customer. Unless specialized,
this is a '500 Internal Server Error'
"""
code = 500
title = 'Internal Server Error'
explanation = (
'The server has either erred or is incapable of performing\r\n'
'the requested operation.\r\n')
class HTTPInternalServerError(HTTPServerError):
pass
class HTTPNotImplemented(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not support the functionality
required to fulfill the request.
code: 501, title: Not Implemented
"""
code = 501
title = 'Not Implemented'
template = Template('''
The request method ${REQUEST_METHOD} is not implemented for this server. <br /><br />
${detail}''')
class HTTPBadGateway(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server, while acting as a gateway or proxy,
received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
code: 502, title: Bad Gateway
"""
code = 502
title = 'Bad Gateway'
explanation = ('Bad gateway.')
class HTTPServiceUnavailable(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
code: 503, title: Service Unavailable
"""
code = 503
title = 'Service Unavailable'
explanation = ('The server is currently unavailable. '
'Please try again at a later time.')
class HTTPGatewayTimeout(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server, while acting as a gateway or proxy,
did not receive a timely response from the upstream server specified
by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server
(e.g. DNS) it needed to access in attempting to complete the request.
code: 504, title: Gateway Timeout
"""
code = 504
title = 'Gateway Timeout'
explanation = ('The gateway has timed out.')
class HTTPVersionNotSupported(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not support, or refuses to
support, the HTTP protocol version that was used in the request
message.
code: 505, title: HTTP Version Not Supported
"""
code = 505
title = 'HTTP Version Not Supported'
explanation = ('The HTTP version is not supported.')
class HTTPInsufficientStorage(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not have enough space to save
the resource.
code: 507, title: Insufficient Storage
"""
code = 507
title = 'Insufficient Storage'
explanation = ('There was not enough space to save the resource')
class HTTPNetworkAuthenticationRequired(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the client needs to authenticate to gain
network access. From RFC 6585, "Additional HTTP Status Codes".
code: 511, title: Network Authentication Required
"""
code = 511
title = 'Network Authentication Required'
explanation = ('Network authentication is required')
class HTTPExceptionMiddleware(object):
"""
Middleware that catches exceptions in the sub-application. This
does not catch exceptions in the app_iter; only during the initial
calling of the application.
This should be put *very close* to applications that might raise
these exceptions. This should not be applied globally; letting
*expected* exceptions raise through the WSGI stack is dangerous.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
try:
return self.application(environ, start_response)
except HTTPException:
parent_exc_info = sys.exc_info()
def repl_start_response(status, headers, exc_info=None):
if exc_info is None:
exc_info = parent_exc_info
return start_response(status, headers, exc_info)
return parent_exc_info[1](environ, repl_start_response)
try:
from paste import httpexceptions
except ImportError: # pragma: no cover
# Without Paste we don't need to do this fixup
pass
else: # pragma: no cover
for name in dir(httpexceptions):
obj = globals().get(name)
if (obj and isinstance(obj, type) and issubclass(obj, HTTPException)
and obj is not HTTPException
and obj is not WSGIHTTPException):
obj.__bases__ = obj.__bases__ + (getattr(httpexceptions, name),)
del name, obj, httpexceptions
__all__ = ['HTTPExceptionMiddleware', 'status_map']
status_map={}
for name, value in list(globals().items()):
if (isinstance(value, (type, class_types)) and
issubclass(value, HTTPException)
and not name.startswith('_')):
__all__.append(name)
if getattr(value, 'code', None):
status_map[value.code]=value
if hasattr(value, 'explanation'):
value.explanation = ' '.join(value.explanation.strip().split())
del name, value
| gpl-2.0 | -6,672,500,066,994,011,000 | 30.170121 | 85 | 0.655548 | false |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/njpwworld.py | 8 | 3103 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
extract_attributes,
get_element_by_class,
urlencode_postdata,
)
class NJPWWorldIE(InfoExtractor):
_VALID_URL = r'https?://njpwworld\.com/p/(?P<id>[a-z0-9_]+)'
IE_DESC = '新日本プロレスワールド'
_NETRC_MACHINE = 'njpwworld'
_TEST = {
'url': 'http://njpwworld.com/p/s_series_00155_1_9/',
'info_dict': {
'id': 's_series_00155_1_9',
'ext': 'mp4',
'title': '第9試合 ランディ・サベージ vs リック・スタイナー',
'tags': list,
},
'params': {
'skip_download': True, # AES-encrypted m3u8
},
'skip': 'Requires login',
}
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
# No authentication to be performed
if not username:
return True
webpage, urlh = self._download_webpage_handle(
'https://njpwworld.com/auth/login', None,
note='Logging in', errnote='Unable to login',
data=urlencode_postdata({'login_id': username, 'pw': password}),
headers={'Referer': 'https://njpwworld.com/auth'})
# /auth/login will return 302 for successful logins
if urlh.geturl() == 'https://njpwworld.com/auth/login':
self.report_warning('unable to login')
return False
return True
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for mobj in re.finditer(r'<a[^>]+\bhref=(["\'])/player.+?[^>]*>', webpage):
player = extract_attributes(mobj.group(0))
player_path = player.get('href')
if not player_path:
continue
kind = self._search_regex(
r'(low|high)$', player.get('class') or '', 'kind',
default='low')
player_url = compat_urlparse.urljoin(url, player_path)
player_page = self._download_webpage(
player_url, video_id, note='Downloading player page')
entries = self._parse_html5_media_entries(
player_url, player_page, video_id, m3u8_id='hls-%s' % kind,
m3u8_entry_protocol='m3u8_native')
kind_formats = entries[0]['formats']
for f in kind_formats:
f['quality'] = 2 if kind == 'high' else 1
formats.extend(kind_formats)
self._sort_formats(formats)
post_content = get_element_by_class('post-content', webpage)
tags = re.findall(
r'<li[^>]+class="tag-[^"]+"><a[^>]*>([^<]+)</a></li>', post_content
) if post_content else None
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
'tags': tags,
}
| mit | 7,352,635,283,471,513,000 | 31.967391 | 83 | 0.536433 | false |
PetePriority/home-assistant | homeassistant/components/sensor/london_air.py | 4 | 6395 | """
Sensor for checking the status of London air.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.london_air/
"""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_LOCATIONS = 'locations'
SCAN_INTERVAL = timedelta(minutes=30)
AUTHORITIES = [
'Barking and Dagenham',
'Bexley',
'Brent',
'Camden',
'City of London',
'Croydon',
'Ealing',
'Enfield',
'Greenwich',
'Hackney',
'Haringey',
'Harrow',
'Havering',
'Hillingdon',
'Islington',
'Kensington and Chelsea',
'Kingston',
'Lambeth',
'Lewisham',
'Merton',
'Redbridge',
'Richmond',
'Southwark',
'Sutton',
'Tower Hamlets',
'Wandsworth',
'Westminster']
URL = ('http://api.erg.kcl.ac.uk/AirQuality/Hourly/'
'MonitoringIndex/GroupName=London/Json')
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_LOCATIONS, default=AUTHORITIES):
vol.All(cv.ensure_list, [vol.In(AUTHORITIES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the London Air sensor."""
data = APIData()
data.update()
sensors = []
for name in config.get(CONF_LOCATIONS):
sensors.append(AirSensor(name, data))
add_entities(sensors, True)
class APIData:
"""Get the latest data for all authorities."""
def __init__(self):
"""Initialize the AirData object."""
self.data = None
# Update only once in scan interval.
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from TFL."""
response = requests.get(URL, timeout=10)
if response.status_code != 200:
_LOGGER.warning("Invalid response from API")
else:
self.data = parse_api_response(response.json())
class AirSensor(Entity):
"""Single authority air sensor."""
ICON = 'mdi:cloud-outline'
def __init__(self, name, APIdata):
"""Initialize the sensor."""
self._name = name
self._api_data = APIdata
self._site_data = None
self._state = None
self._updated = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def site_data(self):
"""Return the dict of sites data."""
return self._site_data
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
attrs['updated'] = self._updated
attrs['sites'] = len(self._site_data)
attrs['data'] = self._site_data
return attrs
def update(self):
"""Update the sensor."""
self._api_data.update()
self._site_data = self._api_data.data[self._name]
self._updated = self._site_data[0]['updated']
sites_status = []
for site in self._site_data:
if site['pollutants_status'] != 'no_species_data':
sites_status.append(site['pollutants_status'])
if sites_status:
self._state = max(set(sites_status), key=sites_status.count)
else:
self._state = None
def parse_species(species_data):
"""Iterate over list of species at each site."""
parsed_species_data = []
quality_list = []
for species in species_data:
if species['@AirQualityBand'] != 'No data':
species_dict = {}
species_dict['description'] = species['@SpeciesDescription']
species_dict['code'] = species['@SpeciesCode']
species_dict['quality'] = species['@AirQualityBand']
species_dict['index'] = species['@AirQualityIndex']
species_dict['summary'] = (species_dict['code'] + ' is '
+ species_dict['quality'])
parsed_species_data.append(species_dict)
quality_list.append(species_dict['quality'])
return parsed_species_data, quality_list
def parse_site(entry_sites_data):
"""Iterate over all sites at an authority."""
authority_data = []
for site in entry_sites_data:
site_data = {}
species_data = []
site_data['updated'] = site['@BulletinDate']
site_data['latitude'] = site['@Latitude']
site_data['longitude'] = site['@Longitude']
site_data['site_code'] = site['@SiteCode']
site_data['site_name'] = site['@SiteName'].split("-")[-1].lstrip()
site_data['site_type'] = site['@SiteType']
if isinstance(site['Species'], dict):
species_data = [site['Species']]
else:
species_data = site['Species']
parsed_species_data, quality_list = parse_species(species_data)
if not parsed_species_data:
parsed_species_data.append('no_species_data')
site_data['pollutants'] = parsed_species_data
if quality_list:
site_data['pollutants_status'] = max(set(quality_list),
key=quality_list.count)
site_data['number_of_pollutants'] = len(quality_list)
else:
site_data['pollutants_status'] = 'no_species_data'
site_data['number_of_pollutants'] = 0
authority_data.append(site_data)
return authority_data
def parse_api_response(response):
"""Parse return dict or list of data from API."""
data = dict.fromkeys(AUTHORITIES)
for authority in AUTHORITIES:
for entry in response['HourlyAirQualityIndex']['LocalAuthority']:
if entry['@LocalAuthorityName'] == authority:
if isinstance(entry['Site'], dict):
entry_sites_data = [entry['Site']]
else:
entry_sites_data = entry['Site']
data[authority] = parse_site(entry_sites_data)
return data
| apache-2.0 | -3,667,603,604,450,012,000 | 28.470046 | 74 | 0.593276 | false |
FedoraScientific/salome-smesh | src/SMESH_SWIG/ex14_cyl1holed.py | 1 | 3773 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# =======================================
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# Geometrie
# =========
# Creer un cylindre avec un trou cylindrique excentre, decoupage en hexahedre et mailler.
# Donnees
# -------
# unite: millimetre
g_ox = 0
g_oy = 0
g_oz = 0
g_cyl_rayon = 1000
g_cyl_demiHauteur = 3000
g_trou_rayon = 5
g_trou_centre = 300
g_trim = 15000
# Construire le cylindre
# ----------------------
c_point = geompy.MakeVertex(g_ox, g_oy, g_oz-g_cyl_demiHauteur)
c_dir = geompy.MakeVectorDXDYDZ(0, 0, 1)
c_hauteur = 2*g_cyl_demiHauteur
c_cylindre = geompy.MakeCylinder(c_point, c_dir, g_cyl_rayon, c_hauteur)
# Trouer le cylindre par un minuscule cylindre excentre
# -----------------------------------------------------
t_hauteur = g_cyl_demiHauteur
t_point = geompy.MakeVertex(g_ox-g_trou_centre, g_oy, g_oz-t_hauteur)
t_trou = geompy.MakeCylinder(t_point, c_dir, g_trou_rayon, 2*t_hauteur)
t_piece = geompy.MakeCut(c_cylindre, t_trou)
# Geometrie hexahedrique
# ======================
# Decouper
# --------
h_outils = []
h_outils.append(geompy.MakePlane(t_point, geompy.MakeVectorDXDYDZ(1, 0, 0), g_trim))
h_outils.append(geompy.MakePlane(t_point, geompy.MakeVectorDXDYDZ(0, 1, 0), g_trim))
h_piece = geompy.MakePartition([t_piece], h_outils, [], [], geompy.ShapeType["SOLID"])
# Decouper pour les conditions locales
# ------------------------------------
l_outils = []
l_i = 1
l_n = 12
l_hauteur = c_hauteur/l_n
while l_i<l_n:
l_outils.append(geompy.MakePlane(geompy.MakeVertex(g_ox, g_oy, g_oz-g_cyl_demiHauteur+l_i*l_hauteur), c_dir, g_trim))
l_i = l_i+1
piece = geompy.MakePartition([h_piece], l_outils, [], [], geompy.ShapeType["SOLID"])
# Ajouter la piece dans l'etude
# -----------------------------
piece_id = geompy.addToStudy(piece, "ex14_cyl1holed")
# Maillage
# ========
# Creer un maillage hexahedrique
# ------------------------------
hexa = smesh.Mesh(piece, "ex14_cyl1holed:hexa")
algo = hexa.Segment()
algo.NumberOfSegments(4)
hexa.Quadrangle()
hexa.Hexahedron()
# Poser les hypotheses locales
# ----------------------------
m_i = 0
m_n = 12
m_h = c_hauteur/m_n
m_d = [4, 6, 8, 10, 10, 9, 8, 7, 6, 5, 4, 3]
m_x = g_ox+g_cyl_rayon
m_y = g_oy
m_z = g_oz-g_cyl_demiHauteur+m_h/2
while m_i<m_n:
m_p = geompy.MakeVertex(m_x, m_y, m_z + m_i*m_h)
m_e = geompy.GetEdgeNearPoint(piece, m_p)
m_a = hexa.Segment(m_e)
m_a.NumberOfSegments(m_d[m_i])
m_a.Propagation()
m_i = m_i + 1
# Calculer le maillage
# --------------------
hexa.Compute()
| lgpl-2.1 | -4,865,860,661,526,086,000 | 25.201389 | 121 | 0.636629 | false |
badlogicmanpreet/nupic | examples/opf/experiments/classification/category_TP_0/description.py | 6 | 1653 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_TP_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpEnable': True,
'tpParams': { }}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 | 1,107,099,642,865,129,000 | 41.384615 | 76 | 0.61827 | false |
dighl/triples | triplesx.py | 1 | 9601 | #!/usr/bin/python2.6
import cgitb
cgitb.enable()
import cgi
import sqlite3
import datetime
print "Content-type: text/plain; charset=utf-8"
# get the args of the url, convert nasty field storage to plain dictionary,
# there is probably a better solution, but this works for the moment
tmp_args = cgi.FieldStorage()
args = {}
for arg in tmp_args:
args[arg] = tmp_args[arg].value
def get_max_id(args, cursor):
cursor.execute('select DISTINCT ID from '+args['file']+';')
linesA = [x[0] for x in cursor.fetchall()]
cursor.execute(
'select DISTINCT ID from backup where FILE = "'+args['file']+'";'
)
linesB = [x[0] for x in cursor.fetchall()]
try:
maxA = max(linesA)
except ValueError:
maxA = 0
try:
maxB = max(linesB)
except ValueError:
maxB = 0
if maxA >= maxB:
return maxA + 1
else:
return maxB + 1
# check for dbase arg and switch the database in case an argument is provided
if 'remote_dbase' in args:
dbpath = args['remote_dbase']+'.sqlite3' if not \
args['remote_dbase'].endswith('sqlite3') else \
args['remote_dbase']
else:
dbpath = 'triples.sqlite3'
# connect to the sqlite database
db = sqlite3.connect(dbpath)
cursor = db.cursor()
# load the table if this is specified
if 'tables' in args:
print
for line in cursor.execute(
'select name from sqlite_master where name != "backup";'):
print line[0]
elif 'summary' in args and 'file' in args:
taxa = [line[0] for line in cursor.execute(
'select distinct VAL from '+args['file']+' where COL="DOCULECT";'
)]
concepts = [line[0] for line in cursor.execute(
'select distinct VAL from '+args['file'] + ' where COL="CONCEPT";'
)]
columns = [line[0] for line in cursor.execute(
'select distinct COL from '+args['file']+';')]
tstring = ''
for t in sorted(taxa):
tstring += '<option value="'+t.encode('utf-8')+'">'+t.encode('utf-8')+'</option>'
cstrings = []
for t in sorted(concepts):
cstrings += ['<option value="'+t.encode('utf-8')+'">'+t.encode('utf-8')+'</option>']
colstring = ''
for t in sorted(columns):
colstring += '<option value="'+t.encode('utf-8')+'">'+t.encode('utf-8')+'</option>'
from template import html1,html2,script
out1 = html1.format(
DOCULECTS = tstring
)
out2 = html2.format(
COLUMNS = colstring,
SCRIPT = script,
DBASE = args['file']
)
out = out1 + '\n'.join(cstrings) + out2
print 'Content-Type: text/html'
print
print out1
for i,t in enumerate(sorted(concepts)):
print '<option value="'+t.encode('utf-8')+'">' +t.encode('utf-8')+'</option>'
print
print out2
# return most recent edits in the data
elif 'date' in args:
print
cursor.execute(
'select ID,COL from backup where FILE="'+args['file']+'"'+\
' and datetime(DATE) > datetime('+args['date']+')'+\
' group by ID,COL limit 100;')
lines = cursor.fetchall()
data = dict([((a,b),c) for a,b,c in cursor.execute(
'select * from '+args['file']+';'
)])
for line in lines:
try:
val = data[line[0],line[1]].encode('utf-8')
print '{0}\t{1}\t{2}'.format(line[0], line[1], val)
except KeyError:
pass
elif 'new_id' in args:
print
if args['new_id'] in ['new_id','newid','true']:
cursor.execute('select DISTINCT ID from '+args['file']+';')
linesA = [x[0] for x in cursor.fetchall()]
cursor.execute(
'select DISTINCT ID from backup where FILE = "'+args['file']+'";'
)
linesB = [x[0] for x in cursor.fetchall()]
try:
maxA = max(linesA)
except ValueError:
maxA = 0
try:
maxB = max(linesB)
except ValueError:
maxB = 0
if maxA >= maxB:
print str(maxA + 1)
else:
print str(maxB + 1)
else:
lines = [x[0] for x in cursor.execute('select DISTINCT VAL from '+args['file']+\
' where COL="'+args['new_id']+'";')]
# dammit but, it doesn't really seem to work without explicit
# type-checking
cogids = []
for l in lines:
try: cogids += [int(l)]
except: pass
print str(max(cogids)+1)
elif 'file' in args and not 'unique' in args:
print 'Content-Disposition: attachment; filename="triples.tsv"'
print
# get unique columns
if not 'columns' in args:
cols = [line[0] for line in cursor.execute(
'select distinct COL from '+args['file']+';')]
else:
cols = args['columns'].split('|')
print 'ID\t'+'\t'.join(cols)
# if neither concepts or doculects are passed from the args, all ids are
# selected from the database
if not 'concepts' in args and not 'doculects' in args:
idxs = [line[0] for line in cursor.execute(
'select distinct ID from '+args['file']+';')]
else:
# we evaluate the concept string
cstring = 'COL = "CONCEPT" and VAL in ("'+'","'.join(args['concepts'].split('|'))+'")' if \
'concepts' in args else ''
dstring = 'COL = "DOCULECT" and VAL in ("'+'","'.join(args['doculects'].split('|'))+'")' if \
'doculects' in args else ''
cidxs = [line[0] for line in cursor.execute(
'select distinct ID from '+args['file'] + ' where '+cstring)] if \
cstring else []
didxs = [line[0] for line in cursor.execute(
'select distinct ID from '+args['file'] + ' where '+dstring)] if \
dstring else []
if cidxs and didxs:
idxs = [idx for idx in cidxs if idx in didxs]
else:
idxs = cidxs or didxs
# make the dictionary
D = {}
for a,b,c in cursor.execute('select * from '+args['file']+';'):
if c not in ['-','']:
try:
D[a][b] = c.encode('utf-8')
except KeyError:
D[a] = {b:c.encode('utf-8')}
# check for concepts and "template"
if 'concepts' in args and "template" in args and 'doculects' in args:
maxidx = get_max_id(args, cursor)
for doculect in args['doculects'].split('|'):
conceptsIs = [D[idx]['CONCEPT'] for idx in D if 'CONCEPT' in D[idx] and 'DOCULECT' in D[idx] and D[idx]['DOCULECT'] == doculect]
conceptsMiss = [c for c in args['concepts'].split('|') if c not in conceptsIs]
for concept in conceptsMiss:
D[maxidx] = {"CONCEPT":concept, "DOCULECT":doculect, "IPA": '?'}
idxs += [maxidx]
maxidx += 1
print len(D)
# make object
for idx in idxs:
txt = str(idx)
for col in cols:
try:
txt += '\t'+D[idx][col]
except:
txt += '\t'
print txt
# XXX note that the following formula will bring much faster results:
# select * from table where ID||":"||COL in ("X:Y");
# with this formula we can limit the search space drastically, I think
# addon: it seems this is much faster, but also the fact that:
# * we print only once, XXX addon: this doesn't seem to be the case
# * we use python for sampling, and
# * we don't make many exact sqlite3 statements, this should be kept in mind,
# since it may speed up all the processes!
elif 'history' in args:
print
if 'limit' in args:
limit = ' limit ' + args['limit']
else:
limit = ''
cursor.execute(
'select * from backup order by DATE DESC'+limit+';')
backup = cursor.fetchall()
# get ID restriction
idres = '("'+'","'.join([str(line[1])+':'+line[2] for line in backup])+'")'
# get the rest
tables = []
for line in cursor.execute(
'select name from sqlite_master where name != "backup";'):
tables += [line[0]]
data = {}
for table in tables:
for a,b,c in cursor.execute(
'select * from '+table+' where ID||":"||COL in '+idres+';'):
try:
data[table][a,b] = c
except KeyError:
data[table] = {(a,b):c}
txt = ''
for line in backup:
try:
new_val = data[line[0]][line[1],line[2]]
except KeyError:
new_val = '???'
print '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}'.format(
line[0],
line[1],
line[2],
line[3].encode('utf-8'),
new_val.encode('utf-8'),
str(line[4]),
line[5])
# if we are requested to submit unique values, we output one value (like
# concepts and the like) and only take the distinct values from the DB)
elif 'unique' in args:
if not 'content' in args:
args['content'] = 'tsv'
if args['content'] == 'tsv':
print 'Content-Disposition: attachment; filename="triples.tsv"'
print
else:
print
# set backup as default if nothing is passed
if not 'file' in args:
args['file'] = 'backup'
query = 'select distinct val from '+args['file']+' where col = "'+args['unique']+'" order by val;'
print 'NUMBER\t'+args['unique'];
for i,line in enumerate(cursor.execute(query)):
print str(i+1)+'\t'+''.join(line).encode('utf-8')
| gpl-2.0 | -5,061,434,732,639,982,000 | 31.326599 | 140 | 0.540465 | false |
ajmorss/automatedAFManalysis | machine_learning/feature_creation.py | 1 | 6369 | # -*- coding: utf-8 -*-
from automatedAFManalysis.afm_sql_code.afmsqlcode import SQLConnection, SQLData
import numpy as np
import multiprocessing
import csv
class SQLDataForFeatures(SQLData):
'''
The SQLDataForFeatures class is a modified version of the SQLData class
that facilitates the creation and saving of a classified feature set from
AFM force time series for the purposes of supervised learning.
Methods:
Public:
build_write_all_features: Builds classification features for all
cycles in the given raw data file then
writes them to tethersamples.txt
Private:
_build_classification_features: Build the classification features
for the current cycle
'''
def build_write_all_features(self):
'''
Builds classification features for all cycles in the given raw data
file then writes them to tethersamples.txt
'''
while self.get_next_cycle() != None:
#Get the features for the current cycle
features = self._build_classification_features()
#Get the ground truth classification
conn = SQLConnection()
s = ('SELECT tethered FROM cell_data '
'WHERE file_id=%s '
'AND cycle=%s;')
tethered = []
for rows in conn.execute((s % (self.fileid, self.cyc))):
tethered.append(rows[0])
if len(tethered) == 0:
classification = 0
elif tethered[0] == 'Y':
classification = 1
elif tethered[0] == 'N':
classification = 2
#Tack the classification and the unique id of the cycle (so we can
#retrieve it later if we want)
features = np.hstack((features, classification))
s = ('select id from dat '
'WHERE file_id=%s '
'AND cycle=%s '
'AND piezo_state=3;')
datid = []
for rows in conn.execute((s % (self.fileid, self.cyc))):
datid.append(int(rows[0]))
features = np.hstack((features, datid[0]))
conn.close()
#Write the cycles features to file
features = features.tolist()
with open('bondsamples.txt','ab') as datout:
datout=csv.writer(datout)
datout.writerow(features)
def _build_classification_features(self):
'''
The _build_classification_features method creates the features that are
passed to the classifier.
Output:
features: 1d numpy array of features for the current cycle
'''
#Preprocesses data (shortens it, bins it so the length is 512)
peaks = self.fin_peaks(4.5, 201, 5000, 50)
if peaks[0].size > 0:
finbreak = peaks[1][peaks[1].size - 1]
endp = int(finbreak*1.25)
else:
endp = self._datares[self.cyc].get()[1, :].size - 1
samples_dat = self._bindat(self._datares[self.cyc].get()[1, 0:endp], 512)
#Get FFT, square it, take first half
ffts = (np.fft.fft(samples_dat[:]))*np.conj((np.fft.fft(samples_dat[:])))
ffts = ffts[0:ffts.size/2]
ffts = np.real(ffts)
#Bin the data a little more
samples_dat = self._bindat(samples_dat, samples_dat.size/2)
ffts = self._bindat(ffts, ffts.size/2)
#First two features: largest discontinuity (drop/negative slope) and
#largest positive slope
grad_y = np.gradient(samples_dat)
largestdrop = np.min(grad_y)
slopes = np.max(grad_y)
#Next feature, total discontinuities
disconts = peaks[0].size
#TO DO: Rerun and retrain with actual integral values, currently all 0
#dxp=self._datares[self.cyc].get()[0, 1]-self._datares[self.cyc].get()[0, 0]
#integrals = np.trapz(self._datares[self.cyc].get()[1, 0:endp], dx=dxp)
integrals = 0
#Next features, the mean force of the data and the max of the data
means = np.mean(samples_dat)
maxs = np.max(samples_dat)
#Next feature, the number of peaks in the power spectrum
fft_peak_no = ffts[ffts>2.5*np.std(ffts)].size
#The last features are the the data itself and the power spectrum,
#binned to a final length of 16 points each
binned = self._bindat(samples_dat, 16)
ffts = self._bindat(ffts, 16)
#Build and return features
features = [disconts, means, maxs, slopes, largestdrop, integrals, fft_peak_no]
features = np.transpose(np.array(features))
features = np.hstack((features, binned))
features = np.hstack((features, ffts))
return features
def write_all_features(idnum):
'''
Worker task, writes all the features for a given raw data file
'''
dataobj = SQLDataForFeatures(idnum, 0)
dataobj.build_write_all_features()
dataobj.pool.terminate()
del(dataobj)
return True
if __name__ == '__main__':
#Get the list of all raw files that training/testing data will be pulled from
conn = SQLConnection()
s = ('SELECT id FROM afm_data.filelist '
'WHERE id>374 '
'AND id<614 '
'AND analyzed=\'Y\' '
'AND pipette_obj!=\'ECADBEAD (SNAP)\';')
listofids=[]
for rows in conn.execute(s):
listofids.append(int(rows[0]))
conn.close()
print len(listofids)
#Break the complete list of file ids up into chunks
chunksize = float(5)
chunkedids = []
last = 0.0
while last < len(listofids):
chunkedids.append(listofids[int(last):int(last + chunksize)])
last += chunksize
#Go through each entry in chunkeds and send its elements out to worker
#processes, each process take a single raw data file, finds its features
#and writes them
prog=0
for i in chunkedids:
jobs = []
for j in i:
p = multiprocessing.Process(target=write_all_features, args=(j,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
prog = prog+1
print prog | cc0-1.0 | -2,257,061,216,518,495,200 | 36.470588 | 87 | 0.574344 | false |
Noirello/bonsai | tests/test_ldapvaluelist.py | 2 | 3299 | import pytest
from bonsai.ldapvaluelist import LDAPValueList
def test_append():
""" Test LDAPValueList's append method. """
lvl = LDAPValueList()
lvl.append("test")
assert "test" in lvl
with pytest.raises(ValueError):
lvl.append("Test")
def test_insert():
""" Test LDAPValueList's insert method. """
lvl = LDAPValueList(("test1",))
lvl.insert(0, "test2")
assert lvl == ["test2", "test1"]
with pytest.raises(ValueError):
lvl.insert(2, "test2")
def test_remove():
""" Test LDAPValueList's remove method. """
lvl = LDAPValueList(("test1", "test2"))
lvl.remove("Test1")
assert lvl == ["test2"]
with pytest.raises(ValueError):
lvl.remove("test1")
def test_set():
""" Test LDAPValueList's __setitem__ method. """
lvl = LDAPValueList()
lvl[0:2] = ("test1", "test2", "test3")
lvl[1] = "test4"
assert lvl == ["test1", "test4", "test3"]
with pytest.raises(ValueError):
lvl[1] = "test3"
with pytest.raises(ValueError):
lvl[1:3] = ["test5", "test1"]
del lvl[0:2]
assert lvl == ["test3"]
del lvl[0]
assert lvl == []
lvl = LDAPValueList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
del lvl[slice(1, 10, 2)]
assert lvl == [1, 3, 5, 7, 9, 11, 12]
lvl[slice(2, 6, 2)] = (13, 14)
assert lvl == [1, 3, 13, 7, 14, 11, 12]
def test_extend():
""" Test LDAPValueList's extend method. """
lvl = LDAPValueList(("test1",))
lvl.extend(("test2", "test3"))
assert lvl == ["test1", "test2", "test3"]
with pytest.raises(ValueError):
lvl.extend(("test4", "test1"))
def test_pop():
""" Test LDAPValueList's pop method. """
lvl = LDAPValueList(("test1", "test2"))
assert lvl.pop(0) == "test1"
assert lvl == ["test2"]
lvl.pop()
assert lvl == []
with pytest.raises(IndexError):
lvl.pop()
def test_copy():
""" Test LDAPValueList's copy method. """
lvl1 = LDAPValueList(("test1", "test2"))
lvl2 = lvl1.copy()
assert lvl1 == lvl2
assert lvl1.status == lvl2.status
def test_add():
""" Test adding list to an LDAPValueList. """
lvl = LDAPValueList((1, 2, 3))
assert lvl + [4, 5] == [1, 2, 3, 4, 5]
with pytest.raises(TypeError):
_ = lvl + 3
with pytest.raises(TypeError):
lvl += "x"
lvl += [4, 5]
assert lvl == [1, 2, 3, 4, 5]
def test_mul():
""" Test multiplying an LDAPValueList. """
lvl = LDAPValueList((1, 2, 3))
with pytest.raises(TypeError):
_ = lvl * 3
def test_set_status():
""" Test setting LDAPValueList's status. """
lvl = LDAPValueList()
with pytest.raises(TypeError):
lvl.status = "a"
with pytest.raises(ValueError):
lvl.status = -1
lvl.status = 2
assert lvl.status == 2
def test_clear():
""" Test setting LDAPValueList's clear method. """
lvl = LDAPValueList((1, 2, 3))
lvl.append(4)
lvl.clear()
assert lvl == []
def test_readonly_attrs():
""" Test modifying read-only attributes. """
lvl = LDAPValueList((1, 2, 3))
with pytest.raises(ValueError):
lvl.added = [1, 2, 3]
with pytest.raises(ValueError):
lvl.deleted = [1, 2, 3]
with pytest.raises(TypeError):
lvl._status_dict = {"status": 2} | mit | -5,303,534,331,157,775,000 | 24.78125 | 64 | 0.56896 | false |
JackDanger/sentry | tests/sentry/models/test_organizationaccessrequest.py | 9 | 1952 | from __future__ import absolute_import
from django.core import mail
from sentry.models import (
OrganizationAccessRequest, OrganizationMember, OrganizationMemberTeam
)
from sentry.testutils import TestCase
class SendRequestEmailTest(TestCase):
def test_sends_email_to_everyone(self):
owner = self.create_user('[email protected]')
team_admin = self.create_user('[email protected]')
non_team_admin = self.create_user('[email protected]')
random_member = self.create_user('[email protected]')
requesting_user = self.create_user('[email protected]')
org = self.create_organization(owner=owner)
team = self.create_team(organization=org)
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
organization=org,
user=owner,
),
team=team,
)
self.create_member(
organization=org,
user=team_admin,
role='admin',
teams=[team],
)
self.create_member(
organization=org,
user=non_team_admin,
role='admin',
teams=[],
)
self.create_member(
organization=org,
user=random_member,
role='member',
teams=[team],
)
requesting_member = self.create_member(
organization=org,
user=requesting_user,
role='member',
teams=[],
)
request = OrganizationAccessRequest.objects.create(
member=requesting_member,
team=team,
)
with self.tasks():
request.send_request_email()
assert len(mail.outbox) == 2, [m.subject for m in mail.outbox]
assert sorted([m.to[0] for m in mail.outbox]) == \
sorted([owner.email, team_admin.email])
| bsd-3-clause | 8,699,312,299,698,234,000 | 27.705882 | 73 | 0.573258 | false |
chengduoZH/Paddle | python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py | 2 | 3026 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from simple_nets import simple_fc_net, fc_with_batchnorm, init_data
from parallel_executor_test_base import TestParallelExecutorBase
import paddle.fluid as fluid
import paddle.fluid.core as core
import unittest
import os
class TestMNIST(TestParallelExecutorBase):
@classmethod
def setUpClass(cls):
os.environ['CPU_NUM'] = str(4)
def _compare_fuse_elewise_add_act_ops(self, model, use_cuda):
if use_cuda and not core.is_compiled_with_cuda():
return
img, label = init_data()
def _optimizer(learning_rate=1e-6):
optimizer = fluid.optimizer.SGD(
learning_rate=learning_rate,
regularization=fluid.regularizer.L2Decay(1e-6))
return optimizer
# NOTE(dzh):
# need to make it compatible with elewise fuse act
# FIXME (liuwei12)
# the new memory optimize strategy will crash this unittest
# add enable_inplace=False here to force pass the unittest
not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
use_cuda=use_cuda,
fuse_elewise_add_act_ops=False,
use_ir_memory_optimize=False,
enable_inplace=False,
optimizer=_optimizer)
fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
use_cuda=use_cuda,
fuse_elewise_add_act_ops=True,
use_ir_memory_optimize=False,
enable_inplace=False,
optimizer=_optimizer)
for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
def test_simple_fc_with_fuse_op(self):
self._compare_fuse_elewise_add_act_ops(simple_fc_net, True)
self._compare_fuse_elewise_add_act_ops(simple_fc_net, False)
def test_batchnorm_fc_with_fuse_op(self):
self._compare_fuse_elewise_add_act_ops(fc_with_batchnorm, True)
self._compare_fuse_elewise_add_act_ops(fc_with_batchnorm, False)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,038,469,454,949,967,400 | 37.794872 | 87 | 0.644415 | false |
CptLemming/libsaas | libsaas/services/flurry/resource.py | 4 | 11177 | from libsaas.services import base
from libsaas import http, parsers
def to_camelcase(val):
words = val.split('_')
words = [words[0].lower()] + [x.capitalize() for x in words[1:]]
return ''.join(words)
class Metrics(base.HierarchicalResource):
path = 'appMetrics'
def get_url(self):
grandparent = self.parent.parent
return '{0}/{1}'.format(grandparent.get_url(), self.path)
def _get(self, metric_name, start_date, end_date,
country=None, version_name=None, group_by=None):
params = base.get_params(None, locals(),
translate_param=to_camelcase)
params.pop('metricName')
url = '{0}/{1}'.format(self.get_url(), metric_name)
request = http.Request('GET', url, params)
return request, parsers.parse_json
@base.apimethod
def active_users(self, *args, **kwargs):
"""
Returns the total number of unique users who accessed
the application per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('ActiveUsers', *args, **kwargs)
@base.apimethod
def active_users_by_week(self, *args, **kwargs):
"""
Returns the total number of unique users who accessed
the application per week
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('ActiveUsersByWeek', *args, **kwargs)
@base.apimethod
def active_users_by_month(self, *args, **kwargs):
"""
Returns the total number of unique users who accessed
the application per month.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('ActiveUsersByMonth', *args, **kwargs)
@base.apimethod
def new_users(self, *args, **kwargs):
"""
Returns the total number of unique users who used the
application for the first time per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('NewUsers', *args, **kwargs)
@base.apimethod
def median_session_length(self, *args, **kwargs):
"""
Returns the median length of a user session per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('MedianSessionLength', *args, **kwargs)
@base.apimethod
def avg_session_length(self, *args, **kwargs):
"""
Returns the average length of a user session per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('AvgSessionLength', *args, **kwargs)
@base.apimethod
def sessions(self, *args, **kwargs):
"""
Returns the total number of times users accessed
the application per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('Sessions', *args, **kwargs)
@base.apimethod
def retained_users(self, *args, **kwargs):
"""
Returns the total number of users who remain active users of
the application per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('RetainedUsers', *args, **kwargs)
@base.apimethod
def page_views(self, *args, **kwargs):
"""
Returns the total number of page views per day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('PageViews', *args, **kwargs)
@base.apimethod
def avg_page_views_per_session(self, *args, **kwargs):
"""
Returns the average page views per session for each day.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var country: optional parameter indicating user's country.
:vartype country: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
:var group_by: group data by DAYS, WEEKS or MONTHS.
By default, it will group data by days.
:vartype group_by: str
"""
return self._get('AvgPageViewsPerSession', *args, **kwargs)
class EventResource(base.HierarchicalResource):
def __init__(self, parent, object_id=None):
self.parent = parent
self.object_id = object_id
def get_url(self):
grandparent = self.parent.parent
return '{0}/eventMetrics/{1}'.format(grandparent.get_url(), self.path)
@base.apimethod
def get(self, start_date, end_date, version_name=None):
"""
For single-object resources, fetch the object's data. For collections,
fetch all of the objects.
:var start_date: the first date to look metrics for.
:vartype start_date: str
:var end_date: the last date to look metrics for.
:vartype end_date: str
:var version_name: optional parameter indicating application's version.
:vartype version_name: str
"""
params = base.get_params(None, locals(),
translate_param=to_camelcase)
if self.object_id:
params['eventName'] = self.object_id
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class Events(EventResource):
path = 'Summary'
class Event(EventResource):
path = 'Event'
class ApplicationResource(base.HierarchicalResource):
@base.apimethod
def get(self):
"""
For single-object resources, fetch the object's data. For collections,
fetch all of the objects.
"""
request = http.Request('GET', self.get_url())
return request, parsers.parse_json
class Application(ApplicationResource):
path = 'appInfo/getApplication'
def __init__(self, parent, application_api_key):
super(Application, self).__init__(parent)
self.application_api_key = application_api_key
self.add_filter(self.add_authorization)
def add_authorization(self, request):
request.params['apiKey'] = self.application_api_key
@base.resource(Metrics)
def metrics(self):
"""
Returns the resource corresponding to all metrics.
"""
return Metrics(self)
@base.resource(Events)
def events(self):
"""
Return the resource corresponding to all events.
"""
return Events(self)
@base.resource(Event)
def event(self, event_name):
"""
Returns the resource corresponding to a single event.
"""
return Event(self, event_name)
class Applications(ApplicationResource):
path = 'appInfo/getAllApplications'
| mit | -7,088,859,782,821,526,000 | 30.396067 | 79 | 0.619039 | false |
msrconsulting/atm-py | atmPy/radiation/solar.py | 6 | 4605 | __author__ = 'mrichardson, Hagen Telg'
from math import fmod, sin, cos, asin
import ephem
import numpy as np
import pandas as pd
from atmPy.atmos.constants import a2r, r2a
__julian = {"day": 0., "cent": 0.}
class solar(object):
def __init__(self, ltime):
julian = solar.juliandates(ltime)
self.__jday = julian["day"]
self.__jcent = julian["cent"]
self.lon = 0
self.lat = 0
sinrad = lambda x: sin(a2r(x))
cosrad = lambda x: cos(a2r(x))
def juliandates(self, ltime):
"""
Calculate a Julian date for a given local time
Parameters
----------
ltime: float
Local time calculated as seconds since Jan 1 1904
Returns
--------
dictionary
Returns a dictionary of two floats containing julian day and century.
"""
# Julian day is the continuous count of days since the beginning of the Julian period.
self.__jday = ltime/(3600*24)+1462+2415018.5
self.__jcent = (self.__jday-2451545)/36525
return None
def __oblelip(self):
return ((21.448-self.__jcent*(self.__jcent*
(0.00059-(self.__jcent*0.001813))+46.815))/60+26)/60+23
def __gemeanlon(self):
return fmod((self.__jcent*0.0003032+36000.76983)*self.__jcent+280.46646, 360)
def __meananom(self):
return self.__jcent*(self.__jcent*0.0001537-35999.05029)+357.52911
def __eartheccen(self):
return self.__jcent*(self.__jcent*1.267e-7+4.2037e-5)-0.016708634
def __centsun(self):
f = lambda x: sin(a2r(x))
a = f(3)*0.000289
b = f(2)*(0.019993-self.__jcent*0.000101)
c = f(1)*(self.__jcent*(self.__jcent**1.45e-5+0.004817)-1.914602)
return a+b+c
def __oblcorr(self):
return self.cosrad(self.__jcent*1934.136-125.04)*0.00256+self.__oblelip()
def __truelon(self):
return self.__gemeanlon() + self.__centsun()
def __app(self):
a = self.__truelon()-0.00569
a -= self.sinrad(self.__jcent*1934.136-125.04)*0.00478
return a
def __declang(self):
return r2a(asin(self.sinrad(self.__oblcorr())*self.sinrad(self.__app())))
def __eq_time(self):
return None
def get_sun_position(lat, lon, datetime_UTC, elevation=0):
"""returns elevation and azimuth angle of the sun
Arguments:
----------
lat, lon: float
latitude and longitude of the observer (e.g. Denver, lat = 39.7392, lon = -104.9903)
datetime_UTC: datetime instance or strint ('2015/7/6 19:00:00')
time of interestes in UTC
elevation: float, optional.
elevation of observer.
Returns
-------
tuple of two floats
elevation and azimuth angle in radians.
"""
obs = ephem.Observer()
obs.lat = lat
obs.long = lon
obs.elevation = elevation
# obs.date = '2015/7/6 19:00:00'
obs.date = datetime_UTC # datetime.datetime.now() + datetime.timedelta(hours = 6)
# print(obs)
sun = ephem.Sun()
sun.compute(obs)
return sun.alt, sun.az
def get_sun_position_TS(timeseries):
"""Returns the position, polar and azimuth angle, of the sun in the sky for a given time and location.
Arguments
---------
timeseries: pandas.DataFrame instance with the index being of type datetime (e.g. atmPy.timeseries).
This is typically a housekeeping/telemetry timeseries. It must contain the columns
Lat, Lon, and Height
Returns
-------
pandas.DataFram with two collums for the elevation and azimuth angle
Furthermore the timeseries gets two new collumns with the two angles
"""
lat = timeseries.data.Lat.values.astype(str)
lon = timeseries.data.Lon.values.astype(str)
alti = timeseries.data.Altitude.values
t = timeseries.data.Lat.index
sunpos = np.zeros((lat.shape[0], 2))
# sunpos = np.zeros((2,2))
for e, i in enumerate(lat):
if 0 == 1:
break
sunpos[e] = get_sun_position(lat[e], lon[e], t[e], elevation=alti[e])
# return sunpos
timeseries.data['Solar_position_elevation'] = pd.Series(sunpos[:, 0], index=timeseries.data.index)
timeseries.data['Solar_position_azimuth'] = pd.Series(sunpos[:, 1], index=timeseries.data.index)
# return pd.DataFrame(sunpos, columns=['elevation', 'azimuth'], index=timeseries.data.index)
return timeseries
| mit | 2,767,169,806,084,939,000 | 29.979167 | 106 | 0.586102 | false |
dushmis/osquery | tools/tests/utils.py | 24 | 2940 | #!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
def red(msg):
return "\033[41m\033[1;30m %s \033[0m" % str(msg)
def lightred(msg):
return "\033[1;31m%s\033[0m" % str(msg)
def yellow(msg):
return "\033[43m\033[1;30m %s \033[0m" % str(msg)
def green(msg):
return "\033[42m\033[1;30m %s \033[0m" % str(msg)
def blue(msg):
return "\033[46m\033[1;30m %s \033[0m" % str(msg)
def read_config(path):
with open(path, "r") as fh:
return json.loads(fh.read())
def write_config(data={}, path=None):
if path is None:
path = data["options"]["config_path"]
with open(path, "w") as fh:
fh.write(json.dumps(data))
def platform():
platform = sys.platform
if platform.find("linux") == 0:
platform = "linux"
if platform.find("freebsd") == 0:
platform = "freebsd"
return platform
def queries_from_config(config_path):
config = {}
try:
with open(config_path, "r") as fh:
config = json.loads(fh.read())
except Exception as e:
print ("Cannot open/parse config: %s" % str(e))
exit(1)
queries = {}
if "scheduledQueries" in config:
for query in config["scheduledQueries"]:
queries[query["name"]] = query["query"]
if "schedule" in config:
for name, details in config["schedule"].iteritems():
queries[name] = details["query"]
if len(queries) == 0:
print ("Could not find a schedule/queries in config: %s" % config_path)
exit(0)
return queries
def queries_from_tables(path, restrict):
"""Construct select all queries from all tables."""
# Let the caller limit the tables
restrict_tables = [t.strip() for t in restrict.split(",")]
spec_platform = platform()
tables = []
for base, _, files in os.walk(path):
for spec in files:
if spec[0] == '.' or spec in ["blacklist"]:
continue
spec_platform = os.path.basename(base)
table_name = spec.split(".table", 1)[0]
if spec_platform not in ["specs", platform()]:
continue
# Generate all tables to select from, with abandon.
tables.append("%s.%s" % (spec_platform, table_name))
if len(restrict) > 0:
tables = [t for t in tables if t.split(".")[1] in restrict_tables]
queries = {}
for table in tables:
queries[table] = "SELECT * FROM %s;" % table.split(".", 1)[1]
return queries
| bsd-3-clause | -7,516,679,022,976,035,000 | 27.269231 | 79 | 0.602041 | false |
zonca/boinc | test/test_sanity.py | 4 | 3318 | #!/usr/bin/env python
## $Id: test_sanity.py 2246 2003-09-04 05:07:17Z quarl $
from testbase import *
import urllib, random
# Test makes sure that testing framework is sane:
#
# - executables compiled
# - cgi server works
# - test proxy works
# - mysql permissions and command-line client works
def read_url(url, quiet=False):
'''return 1 line from url'''
verbose_echo(2, " reading url: "+url)
err = ''
try:
return urllib.URLopener().open(url).readline().strip()
except IOError, e:
err = e
except AttributeError:
# Python urllib is buggy if connection is closed (by our proxy
# intentionally) right after opened
pass
if not quiet:
error("couldn't access url: %s %s" % (url, err))
else:
verbose_echo(2, "couldn't access url: %s %s" % (url, err))
return ''
if __name__ == '__main__':
test_msg("framework sanity")
# verbose_echo(1, "Checking executables")
# check_core_client_executable()
# check_app_executable("upper_case")
# check_app_executable("concat")
# check_app_executable("1sec")
verbose_echo(1, "Checking directories")
for d in ['projects_dir',
'cgi_dir', 'html_dir', 'hosts_dir']:
dir = options.__dict__[d]
if not os.path.isdir(dir):
error("%s doesn't exist: %s" % (d, dir))
magic = "Foo %x Bar" % random.randint(0,2**16)
html_path = os.path.join(options.html_dir, 'test_sanity.txt')
html_url = os.path.join(options.html_url, 'test_sanity.txt')
html_proxy_url = proxerize(html_url)
cgi_path = os.path.join(options.cgi_dir, 'test_sanity_cgi')
cgi_url = os.path.join(options.cgi_url, 'test_sanity_cgi')
verbose_echo(1, "Checking webserver setup: non-cgi")
print >>open(html_path,'w'), magic
if read_url(html_url) != magic:
error("couldn't access a file I just wrote: "+html_path+"\n using url: "+html_url)
verbose_echo(1, "Checking proxy setup")
if read_url(html_proxy_url, quiet=True):
error("Another proxy already running")
else:
proxy = Proxy('')
if read_url(html_proxy_url) != magic:
error("couldn't access file using proxy url: "+html_proxy_url)
else:
proxy.stop()
proxy = Proxy('close_connection if $nconnections < 2')
if read_url(html_proxy_url, quiet=True):
error("Proxy should have closed connection #1")
if read_url(html_proxy_url) != magic:
error("Proxy should have allowed connection #2")
proxy.stop()
os.unlink(html_path)
verbose_echo(1, "Checking webserver setup: cgi")
print >>open(cgi_path,'w'), '''#!/bin/sh
echo "Content-Type: text/plain"
echo ""
echo "%s"
''' % magic
os.chmod(cgi_path, 0755)
if read_url(cgi_url) != magic:
error("couldn't access a cgi file I just wrote: "+cgi_path+"\n using url: "+cgi_url)
os.unlink(cgi_path)
database_name = 'boinc_test_sanity_mysql_%s_%d'%(
os.environ['USER'], random.randint(0,2**16))
# create and drop a database
verbose_echo(1, "Checking mysql commandline and permissions")
shell_call('echo "create database %s" | mysql' % database_name)
shell_call('echo "drop database %s" | mysql' % database_name)
| gpl-3.0 | 1,932,945,434,980,375,800 | 31.851485 | 93 | 0.606088 | false |
DigFarmer/aircraft | bsp/stm32f0x/rtconfig.py | 17 | 3443 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
STM32_TYPE = 'STM32F0XX'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M0'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M0'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f0xx_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 | -8,977,840,933,347,943,000 | 26.325397 | 130 | 0.542841 | false |
srkukarni/heron | heron/examples/src/python/spout/stateful_word_spout.py | 1 | 2069 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module for example spout: WordSpout"""
from itertools import cycle
from collections import Counter
from heron.api.src.python.spout.spout import Spout
from heron.api.src.python.state.stateful_component import StatefulComponent
class StatefulWordSpout(Spout, StatefulComponent):
"""StatefulWordSpout: emits a set of words repeatedly"""
# output field declarer
outputs = ['word']
# pylint: disable=attribute-defined-outside-init
def initState(self, stateful_state):
self.recovered_state = stateful_state
self.logger.info("Checkpoint Snapshot recovered : %s" % str(self.recovered_state))
def preSave(self, checkpoint_id):
# Purely for debugging purposes
for (k, v) in self.counter.items():
self.recovered_state.put(k, v)
self.logger.info("Checkpoint Snapshot %s : %s" % (checkpoint_id, str(self.recovered_state)))
# pylint: disable=unused-argument
def initialize(self, config, context):
self.logger.info("In initialize() of WordSpout")
self.words = cycle(["hello", "bye", "good", "bad", "heron", "storm"])
self.counter = Counter()
self.emit_count = 0
self.ack_count = 0
self.fail_count = 0
self.logger.info("Component-specific config: \n%s" % str(config))
def next_tuple(self):
word = next(self.words)
self.emit([word], tup_id='message id')
self.counter[word] += 1
self.emit_count += 1
if self.emit_count % 100000 == 0:
self.logger.info("Emitted " + str(self.emit_count))
| apache-2.0 | -8,585,869,515,021,551,000 | 36.618182 | 96 | 0.713388 | false |
antonpuz/phpbuildpack | cf_spec/fixtures/php_app_with_custom_extension/.extensions/phpmyadmin/extension.py | 5 | 1498 | """PHPMyAdmin Extension
Downloads, installs and configures PHPMyAdmin
"""
import os
import os.path
import logging
from build_pack_utils import utils
_log = logging.getLogger('phpmyadmin')
DEFAULTS = utils.FormattedDict({
'PHPMYADMIN_VERSION': '4.3.12',
'PHPMYADMIN_PACKAGE': 'phpMyAdmin-{PHPMYADMIN_VERSION}-english.tar.gz',
'PHPMYADMIN_HASH': '707064d1efc37acc8e6fd7ddcc9030616ae32562',
'PHPMYADMIN_URL': 'http://sourceforge.net/projects/phpmyadmin/'
'files/phpMyAdmin/{PHPMYADMIN_VERSION}/'
'{PHPMYADMIN_PACKAGE}/download#'
})
# Extension Methods
def preprocess_commands(ctx):
return ()
def service_commands(ctx):
return {}
def service_environment(ctx):
return {}
def compile(install):
print 'Installing PHPMyAdmin %s' % DEFAULTS['PHPMYADMIN_VERSION']
ctx = install.builder._ctx
inst = install._installer
workDir = os.path.join(ctx['TMPDIR'], 'phpmyadmin')
inst.install_binary_direct(
DEFAULTS['PHPMYADMIN_URL'],
DEFAULTS['PHPMYADMIN_HASH'],
workDir,
fileName=DEFAULTS['PHPMYADMIN_PACKAGE'],
strip=True)
(install.builder
.move()
.everything()
.under('{BUILD_DIR}/htdocs')
.into(workDir)
.done())
(install.builder
.move()
.everything()
.under(workDir)
.where_name_does_not_match('^%s/setup/.*$' % workDir)
.into('{BUILD_DIR}/htdocs')
.done())
return 0
| apache-2.0 | -3,218,559,545,929,199,000 | 23.557377 | 75 | 0.627503 | false |
atrosinenko/lecture-notes-compiler | lnc/plugins/djvu.py | 1 | 2155 | from __future__ import unicode_literals
import os.path
import glob
import errno
from lnc.plugins.base_plugin import BasePlugin
from lnc.lib.process import cmd_try_run, cmd_run, _COMMAND_NOT_FOUND_MSG
from lnc.lib.io import mkdir_p, filter_regexp, needs_update
from lnc.lib.exceptions import ProgramError
def handler(info):
try:
os.remove(info["output"])
except OSError as err:
if err.errno != errno.ENOENT:
raise
cmd_run(["c44", info["input"], info["output"]])
class Plugin(BasePlugin):
def test(self):
self._check_target_options(["in-cache-dir",
"out-cache-dir",
"djvu-file"])
cmd_try_run("c44", fail_msg=_COMMAND_NOT_FOUND_MSG.format(
command="c44",
package="DjVuLibre"))
cmd_try_run("djvm", fail_msg=_COMMAND_NOT_FOUND_MSG.format(
command="djvm",
package="DjVuLibre"))
def before_tasks(self):
out_cache_dir = self._get_option("out-cache-dir")
djvu_file = self._get_option("djvu-file")
mkdir_p(out_cache_dir)
mkdir_p(os.path.dirname(djvu_file))
def get_tasks(self):
in_cache_dir = self._get_option("in-cache-dir")
out_cache_dir = self._get_option("out-cache-dir")
imgs = filter_regexp(in_cache_dir, r"^[0-9]+[.].*$")
res = []
for img in imgs:
num = int(img[:img.index(".")])
x = {
"__handler__": handler,
"input": os.path.join(in_cache_dir, img),
"output": os.path.join(out_cache_dir, "%04d.djvu" % num)
}
if needs_update(x["input"], x["output"]):
res.append(x)
return res
def after_tasks(self):
out_cache_dir = self._get_option("out-cache-dir")
djvu_file = self._get_option("djvu-file")
input_files = sorted(glob.glob(os.path.join(out_cache_dir, "*.djvu")))
if len(input_files) == 0:
raise ProgramError(_("No input files."))
cmd_run(["djvm", "-create", djvu_file] + input_files)
| mit | 5,267,668,773,337,236,000 | 31.651515 | 78 | 0.5471 | false |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py | 18 | 2169 | # postgresql/pypostgresql.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
:connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
"""
from ... import util
from ... import types as sqltypes
from .base import PGDialect, PGExecutionContext
from ... import processors
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = 'pypostgresql'
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: PGNumeric,
# prevents PGNumeric from being used
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
else:
opts['port'] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
| mit | 4,926,370,069,080,030,000 | 26.807692 | 101 | 0.672199 | false |
sigma-random/pyew | vtrace/platforms/base.py | 16 | 28413 | """
Tracer Platform Base
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import struct
import vtrace
import traceback
import platform
from Queue import Queue
from threading import Thread,currentThread,Lock
import envi
import envi.memory as e_mem
import envi.threads as e_threads
import envi.resolver as e_resolv
import vstruct.builder as vs_builder
class TracerBase(vtrace.Notifier):
"""
The basis for a tracer's internals. All platformFoo/archFoo
functions are defaulted, and internal state is initialized.
Additionally, a number of internal utilities are housed here.
"""
def __init__(self):
"""
The routine to initialize a tracer's initial internal state. This
is used by the initial creation routines, AND on attaches/executes
to re-fresh the state of the tracer.
WARNING: This will erase all metadata/symbols (modes/notifiers are kept)
"""
vtrace.Notifier.__init__(self)
self.pid = 0 # Attached pid (also used to know if attached)
self.exited = False
self.breakpoints = {}
self.newbreaks = []
self.bpbyid = {}
self.bpid = 0
self.curbp = None
self.bplock = Lock()
self.deferred = []
self.running = False
self.runagain = False
self.attached = False
# A cache for memory maps and fd listings
self.mapcache = None
self.thread = None # our proxy thread...
self.threadcache = None
self.fds = None
self.signal_ignores = []
self.localvars = {}
# Set if we are RunForever until a thread exit...
self._join_thread = None
self.vsbuilder = vs_builder.VStructBuilder()
self.psize = self.getPointerSize() # From the envi arch mod...
# Track which libraries are parsed, and their
# normame to full path mappings
self.libloaded = {} # True if the library has been loaded already
self.libpaths = {} # normname->filename and filename->normname lookup
# Set up some globally expected metadata
self.setMeta('PendingSignal', None)
self.setMeta('SignalInfo', None)
self.setMeta("IgnoredSignals",[])
self.setMeta("LibraryBases", {}) # name -> base address mappings for binaries
self.setMeta("LibraryPaths", {}) # base -> path mappings for binaries
self.setMeta("ThreadId", 0) # If you *can* have a thread id put it here
plat = platform.system()
rel = platform.release()
self.setMeta("Platform", plat)
self.setMeta("Release", rel)
# Use this if we are *expecting* a break
# which is caused by us (so we remove the
# SIGBREAK from pending_signal
self.setMeta("ShouldBreak", False)
def nextBpId(self):
self.bplock.acquire()
x = self.bpid
self.bpid += 1
self.bplock.release()
return x
def justAttached(self, pid):
"""
platformAttach() function should call this
immediately after a successful attach. This does
any necessary initialization for a tracer to be
back in a clean state.
"""
self.pid = pid
self.attached = True
self.breakpoints = {}
self.bpbyid = {}
self.setMeta("PendingSignal", None)
self.setMeta("ExitCode", 0)
self.exited = False
def getResolverForFile(self, filename):
res = self.resbynorm.get(filename, None)
if res: return res
res = self.resbyfile.get(filename, None)
if res: return res
return None
def steploop(self):
"""
Continue stepi'ing in a loop until shouldRunAgain()
returns false (like RunForever mode or something)
"""
if self.getMode("NonBlocking", False):
e_threads.firethread(self.doStepLoop)()
else:
self.doStepLoop()
def doStepLoop(self):
go = True
while go:
self.stepi()
go = self.shouldRunAgain()
def _doRun(self):
# Exists to avoid recursion from loop in doWait
self.requireAttached()
self.requireNotRunning()
self.requireNotExited()
fastbreak = False
if self.curbp:
fastbreak = self.curbp.fastbreak
# If we are on a breakpoint, and it's a fastbreak
# we don't want to fire a "continue" event.
if not fastbreak:
self.fireNotifiers(vtrace.NOTIFY_CONTINUE)
# Step past a breakpoint if we are on one.
self._checkForBreak()
# Throw down and activate breakpoints...
if not fastbreak:
self._throwdownBreaks()
self.running = True
self.runagain = False
self._syncRegs() # Must be basically last...
self.platformContinue()
self.setMeta("PendingSignal", None)
def wait(self):
"""
Wait for the trace target to have
something happen... If the trace is in
NonBlocking mode, this will fire a thread
to wait for you and return control immediately.
"""
if self.getMode("NonBlocking"):
e_threads.firethread(self._doWait)()
else:
self._doWait()
def _doWait(self):
doit = True
while doit:
# A wrapper method for wait() and the wait thread to use
self.setMeta('SignalInfo', None)
self.setMeta('PendingSignal', None)
event = self.platformWait()
self.running = False
self.platformProcessEvent(event)
doit = self.shouldRunAgain()
if doit:
self._doRun()
def _fireSignal(self, signo, siginfo=None):
self.setMeta('PendingSignal', signo)
self.setMeta('SignalInfo', siginfo)
self.fireNotifiers(vtrace.NOTIFY_SIGNAL)
def _fireExit(self, ecode):
self.setMeta('ExitCode', ecode)
self.fireNotifiers(vtrace.NOTIFY_EXIT)
def _fireExitThread(self, threadid, ecode):
self.setMeta('ExitThread', threadid)
self.setMeta('ExitCode', ecode)
self.fireNotifiers(vtrace.NOTIFY_EXIT_THREAD)
def _activateBreak(self, bp):
# NOTE: This is special cased by hardware debuggers etc...
if bp.isEnabled():
try:
bp.activate(self)
except Exception, e:
traceback.print_exc()
print "WARNING: bpid %d activate failed (deferring): %s" % (bp.id, e)
self.deferred.append(bp)
def _throwdownBreaks(self):
"""
Run through the breakpoints and setup
the ones that are enabled.
NOTE: This should *not* get called when continuing
from a fastbreak...
"""
# Resolve deferred breaks
for bp in self.deferred:
addr = bp.resolveAddress(self)
if addr != None:
self.deferred.remove(bp)
self.breakpoints[addr] = bp
for bp in self.breakpoints.values():
self._activateBreak(bp)
def _syncRegs(self):
"""
Sync the reg-cache into the target process
"""
if self.regcache != None:
for tid, ctx in self.regcache.items():
if ctx.isDirty():
self.platformSetRegCtx(tid, ctx)
self.regcache = None
def _cacheRegs(self, threadid):
"""
Make sure the reg-cache is populated
"""
if self.regcache == None:
self.regcache = {}
ret = self.regcache.get(threadid)
if ret == None:
ret = self.platformGetRegCtx(threadid)
ret.setIsDirty(False)
self.regcache[threadid] = ret
return ret
def _checkForBreak(self):
"""
Check to see if we've landed on a breakpoint, and if so
deactivate and step us past it.
WARNING: Unfortunatly, cause this is used immidiatly before
a call to run/wait, we must block briefly even for the GUI
"""
# Steal a reference because the step should
# clear curbp...
bp = self.curbp
if bp != None and bp.isEnabled():
if bp.active:
bp.deactivate(self)
orig = self.getMode("FastStep")
self.setMode("FastStep", True)
self.stepi()
self.setMode("FastStep", orig)
bp.activate(self)
self.curbp = None
def shouldRunAgain(self):
"""
A unified place for the test as to weather this trace
should be told to run again after reaching some stopping
condition.
"""
if not self.attached:
return False
if self.exited:
return False
if self.getMode("RunForever"):
return True
if self.runagain:
return True
return False
def __repr__(self):
run = "stopped"
exe = "None"
if self.isRunning():
run = "running"
elif self.exited:
run = "exited"
exe = self.getMeta("ExeName")
return "[%d]\t- %s <%s>" % (self.pid, exe, run)
def initMode(self, name, value, descr):
"""
Initialize a mode, this should ONLY be called
during setup routines for the trace! It determines
the available mode setings.
"""
self.modes[name] = bool(value)
self.modedocs[name] = descr
def release(self):
"""
Do cleanup when we're done. This is mostly necissary
because of the thread proxy holding a reference to this
tracer... We need to let him die off and try to get
garbage collected.
"""
if self.thread:
self.thread.go = False
def _cleanupResources(self):
self._tellThreadExit()
def _tellThreadExit(self):
if self.thread != None:
self.thread.queue.put(None)
self.thread.join(timeout=2)
self.thread = None
def __del__(self):
if not self._released:
print 'Warning! tracer del w/o release()!'
def fireTracerThread(self):
# Fire the threadwrap proxy thread for this tracer
# (if it hasnt been fired...)
if self.thread == None:
self.thread = TracerThread()
def fireNotifiers(self, event):
"""
Fire the registered notifiers for the NOTIFY_* event.
"""
if event == vtrace.NOTIFY_SIGNAL:
signo = self.getCurrentSignal()
if signo in self.getMeta("IgnoredSignals", []):
if vtrace.verbose: print "Ignoring",signo
self.runAgain()
return
alllist = self.getNotifiers(vtrace.NOTIFY_ALL)
nlist = self.getNotifiers(event)
trace = self
# if the trace has a proxy it's notifiers
# need that, cause we can't be pickled ;)
if self.proxy:
trace = self.proxy
# First we notify ourself....
self.handleEvent(event, self)
# The "NOTIFY_ALL" guys get priority
for notifier in alllist:
try:
notifier.handleEvent(event,trace)
except:
print "WARNING: Notifier exception for",repr(notifier)
traceback.print_exc()
for notifier in nlist:
try:
notifier.handleEvent(event,trace)
except:
print "WARNING: Notifier exception for",repr(notifier)
traceback.print_exc()
def _cleanupBreakpoints(self):
'''
Cleanup all breakpoints (if the current bp is "fastbreak" this routine
will not be called...
'''
for bp in self.breakpoints.itervalues():
bp.deactivate(self)
def _fireStep(self):
if self.getMode('FastStep', False):
return
self.fireNotifiers(vtrace.NOTIFY_STEP)
def _fireBreakpoint(self, bp):
self.curbp = bp
# A breakpoint should be inactive when fired
# (even fastbreaks, we'll need to deactivate for stepi anyway)
bp.deactivate(self)
try:
bp.notify(vtrace.NOTIFY_BREAK, self)
except Exception, msg:
print "Breakpoint Exception 0x%.8x : %s" % (bp.address,msg)
if not bp.fastbreak:
self.fireNotifiers(vtrace.NOTIFY_BREAK)
else:
# fastbreak's are basically always "run again"
self.runagain = True
def checkPageWatchpoints(self):
"""
Check if the given memory fault was part of a valid
MapWatchpoint.
"""
faultaddr,faultperm = self.platformGetMemFault()
#FIXME this is some AWESOME but intel specific nonsense
if faultaddr == None: return False
faultpage = faultaddr & 0xfffff000
wp = self.breakpoints.get(faultpage, None)
if wp == None:
return False
self._fireBreakpoint(wp)
return True
def checkWatchpoints(self):
# Check for hardware watchpoints
waddr = self.archCheckWatchpoints()
if waddr != None:
wp = self.breakpoints.get(waddr, None)
self._fireBreakpoint(wp)
return True
def checkBreakpoints(self):
"""
This is mostly for systems (like linux) where you can't tell
the difference between some SIGSTOP/SIGBREAK conditions and
an actual breakpoint instruction.
This method will return true if either the breakpoint
subsystem or the sendBreak (via ShouldBreak meta) is true
(and it will have handled firing events for the bp)
"""
pc = self.getProgramCounter()
bi = self.archGetBreakInstr()
bl = pc - len(bi)
bp = self.breakpoints.get(bl, None)
if bp:
addr = bp.getAddress()
# Step back one instruction to account break
self.setProgramCounter(addr)
self._fireBreakpoint(bp)
return True
if self.getMeta("ShouldBreak"):
self.setMeta("ShouldBreak", False)
self.fireNotifiers(vtrace.NOTIFY_BREAK)
return True
return False
def notify(self, event, trace):
"""
We are frequently a notifier for ourselves, so we can do things
like handle events on attach and on break in a unified fashion.
"""
self.threadcache = None
self.mapcache = None
self.fds = None
self.running = False
if event in self.auto_continue:
self.runAgain()
# For thread exits, make sure the tid
# isn't in
if event == vtrace.NOTIFY_EXIT_THREAD:
tid = self.getMeta("ThreadId")
self.sus_threads.pop(tid, None)
# Check if this is a thread we were waiting on.
if tid == self._join_thread:
self._join_thread = None
# Turn off the RunForever in joinThread()
self.setMode('RunForever', False)
# Either way, we don't want to run again...
self.runAgain(False)
# Do the stuff we do for detach/exit or
# cleanup breaks etc...
if event == vtrace.NOTIFY_ATTACH:
pass
elif event == vtrace.NOTIFY_DETACH:
for tid in self.sus_threads.keys():
self.resumeThread(tid)
self._cleanupBreakpoints()
elif event == vtrace.NOTIFY_EXIT:
self.setMode("RunForever", False)
self.exited = True
self.attached = False
elif event == vtrace.NOTIFY_CONTINUE:
self.runagain = False
else:
self._cleanupBreakpoints()
def delLibraryBase(self, baseaddr):
libname = self.getMeta("LibraryPaths").get(baseaddr, "unknown")
normname = self.normFileName(libname)
sym = self.getSymByName(normname)
self.setMeta("LatestLibrary", libname)
self.setMeta("LatestLibraryNorm", normname)
self.fireNotifiers(vtrace.NOTIFY_UNLOAD_LIBRARY)
self.getMeta("LibraryBases").pop(normname, None)
self.getMeta("LibraryPaths").pop(baseaddr, None)
if sym != None:
self.delSymbol(sym)
def addLibraryBase(self, libname, address, always=False):
"""
This should be used *at load time* to setup the library
event metadata.
This *must* be called from a context where it's safe to
fire notifiers, because it will fire a notifier to alert
about a LOAD_LIBRARY. (This means *not* from inside another
notifer)
"""
self.setMeta("LatestLibrary", None)
self.setMeta("LatestLibraryNorm", None)
normname = self.normFileName(libname)
if self.getSymByName(normname) != None:
normname = "%s_%.8x" % (normname,address)
# Only actually do library work with a file or force
if os.path.exists(libname) or always:
self.getMeta("LibraryPaths")[address] = libname
self.getMeta("LibraryBases")[normname] = address
self.setMeta("LatestLibrary", libname)
self.setMeta("LatestLibraryNorm", normname)
width = self.arch.getPointerSize()
sym = e_resolv.FileSymbol(normname, address, 0, width=width)
sym.casesens = self.casesens
self.addSymbol(sym)
self.libpaths[normname] = libname
self.fireNotifiers(vtrace.NOTIFY_LOAD_LIBRARY)
def normFileName(self, libname):
basename = os.path.basename(libname)
return basename.split(".")[0].split("-")[0].lower()
def _loadBinaryNorm(self, normname):
if not self.libloaded.get(normname, False):
fname = self.libpaths.get(normname)
if fname != None:
self._loadBinary(fname)
return True
return False
def _loadBinary(self, filename):
"""
Check if a filename has yet to be parsed. If it has NOT
been parsed, parse it and return True, otherwise, return False
"""
normname = self.normFileName(filename)
if not self.libloaded.get(normname, False):
address = self.getMeta("LibraryBases").get(normname)
if address != None:
self.platformParseBinary(filename, address, normname)
self.libloaded[normname] = True
return True
return False
#######################################################################
#
# NOTE: all platform/arch defaults are populated here.
#
def platformGetThreads(self):
"""
Return a dictionary of <threadid>:<tinfo> pairs where tinfo is either
the stack top, or the teb for win32
"""
raise Exception("Platform must implement platformGetThreads()")
def platformSelectThread(self, thrid):
"""
Platform implementers are encouraged to use the metadata field "ThreadId"
as the identifier (int) for which thread has "focus". Additionally, the
field "StoppedThreadId" should be used in instances (like win32) where you
must specify the ORIGINALLY STOPPED thread-id in the continue.
"""
self.setMeta("ThreadId",thrid)
def platformSuspendThread(self, thrid):
raise Exception("Platform must implement platformSuspendThread()")
def platformResumeThread(self, thrid):
raise Exception("Platform must implement platformResumeThread()")
def platformInjectThread(self, pc, arg=0):
raise Exception("Platform must implement platformInjectThread()")
def platformKill(self):
raise Exception("Platform must implement platformKill()")
def platformExec(self, cmdline):
"""
Platform exec will execute the process specified in cmdline
and return the PID
"""
raise Exception("Platmform must implement platformExec")
def platformInjectSo(self, filename):
raise Exception("Platform must implement injectso()")
def platformGetFds(self):
"""
Return what getFds() wants for this particular platform
"""
raise Exception("Platform must implement platformGetFds()")
def platformGetSignal(self):
'''
Return the currently posted exception/signal....
'''
# Default to the thing they all should do...
return self.getMeta('PendingSignal', None)
def platformSetSignal(self, sig=None):
'''
Set the current signal to deliver to the process on cont.
(Use None for no signal delivery.
'''
self.setMeta('PendingSignal', sig)
def platformGetMaps(self):
"""
Return a list of the memory maps where each element has
the following structure:
(address, length, perms, file="")
NOTE: By Default this list is available as Trace.maps
because the default implementation attempts to populate
them on every break/stop/etc...
"""
raise Exception("Platform must implement GetMaps")
def platformPs(self):
"""
Actually return a list of tuples in the format
(pid, name) for this platform
"""
raise Exception("Platform must implement Ps")
def archGetStackTrace(self):
raise Exception("Architecure must implement argGetStackTrace()!")
def archAddWatchpoint(self, address, size=4, perms="rw"):
"""
Add a watchpoint for the given address. Raise if the platform
doesn't support, or too many are active...
"""
raise Exception("Architecture doesn't implement watchpoints!")
def archRemWatchpoint(self, address):
raise Exception("Architecture doesn't implement watchpoints!")
def archCheckWatchpoints(self):
"""
If the current register state indicates that a watchpoint was hit,
return the address of the watchpoint and clear the event. Otherwise
return None
"""
pass
def archGetRegCtx(self):
"""
Return a new empty envi.registers.RegisterContext object for this
trace.
"""
raise Exception("Platform must implement archGetRegCtx()")
def getStackTrace(self):
"""
Return a list of the stack frames for this process
(currently Intel/ebp based only). Each element of the
"frames list" consists of another list which is (eip,ebp)
"""
raise Exception("Platform must implement getStackTrace()")
def getExe(self):
"""
Get the full path to the main executable for this
*attached* Trace
"""
return self.getMeta("ExeName","Unknown")
def platformAttach(self, pid):
"""
Actually carry out attaching to a target process. Like
platformStepi this is expected to be ATOMIC and not return
until a complete attach.
"""
raise Exception("Platform must implement platformAttach()")
def platformContinue(self):
raise Exception("Platform must implement platformContinue()")
def platformDetach(self):
"""
Actually perform the detach for this type
"""
raise Exception("Platform must implement platformDetach()")
def platformStepi(self):
"""
PlatformStepi should be ATOMIC, meaning it gets called, and
by the time it returns, you're one step further. This is completely
regardless of blocking/nonblocking/whatever.
"""
raise Exception("Platform must implement platformStepi!")
def platformCall(self, address, args, convention=None):
"""
Platform call takes an address, and an array of args
(string types will be mapped and located for you)
platformCall is expected to return a dicionary of the
current register values at the point where the call
has returned...
"""
raise Exception("Platform must implement platformCall")
def platformGetRegCtx(self, threadid):
raise Exception("Platform must implement platformGetRegCtx!")
def platformSetRegCtx(self, threadid, ctx):
raise Exception("Platform must implement platformSetRegCtx!")
def platformProtectMemory(self, va, size, perms):
raise Exception("Plaform does not implement protect memory")
def platformAllocateMemory(self, size, perms=e_mem.MM_RWX, suggestaddr=0):
raise Exception("Plaform does not implement allocate memory")
def platformReadMemory(self, address, size):
raise Exception("Platform must implement platformReadMemory!")
def platformWriteMemory(self, address, bytes):
raise Exception("Platform must implement platformWriteMemory!")
def platformGetMemFault(self):
"""
Return the addr of the current memory fault
or None
"""
#NOTE: This is used by the PageWatchpoint subsystem
# (and is still considered experimental)
return None,None
def platformWait(self):
"""
Wait for something interesting to occur and return a
*platform specific* representation of what happened.
This will then be passed to the platformProcessEvent()
method which will be responsible for doing things like
firing notifiers. Because the platformWait() method needs
to be commonly @threadwrap and you can't fire notifiers
from within a threadwrapped function...
"""
raise Exception("Platform must implement platformWait!")
def platformProcessEvent(self, event):
"""
This method processes the event data provided by platformWait()
This method is responsible for firing ALL notifiers *except*:
vtrace.NOTIFY_CONTINUE - This is handled by the run api (and isn't the result of an event)
"""
raise Exception("Platform must implement platformProcessEvent")
def platformParseBinary(self, filename, baseaddr, normname):
"""
Platforms must parse the given binary file and load any symbols
into the internal SymbolResolver using self.addSymbol()
"""
raise Exception("Platform must implement platformParseBinary")
import threading
def threadwrap(func):
def trfunc(self, *args, **kwargs):
if threading.currentThread().__class__ == TracerThread:
return func(self, *args, **kwargs)
# Proxy the call through a single thread
q = Queue()
# FIXME change calling convention!
args = (self, ) + args
self.thread.queue.put((func, args, kwargs, q))
ret = q.get()
if issubclass(ret.__class__, Exception):
raise ret
return ret
return trfunc
class TracerThread(Thread):
"""
Ok... so here's the catch... most debug APIs do *not* allow
one thread to do the attach and another to do continue and another
to do wait... they just dont. So there. I have to make a thread
per-tracer (on most platforms) and proxy requests (for *some* trace
API methods) to it for actual execution. SUCK!
However, this lets async things like GUIs and threaded things like
cobra not have to be aware of which one is allowed and not allowed
to make particular calls and on what platforms... YAY!
"""
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.setDaemon(True)
self.start()
def run(self):
"""
Run in a circle getting requests from our queue and
executing them based on the thread.
"""
while True:
try:
qobj = self.queue.get()
if qobj == None:
break
meth, args, kwargs, queue = qobj
try:
queue.put(meth(*args, **kwargs))
except Exception,e:
queue.put(e)
if vtrace.verbose:
traceback.print_exc()
continue
except:
if vtrace.verbose:
traceback.print_exc()
| gpl-2.0 | 6,986,315,963,340,076,000 | 32.038372 | 98 | 0.595784 | false |
SavinaRoja/PyUserInput | pymouse/x11.py | 3 | 8098 | #Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from Xlib.display import Display
from Xlib import X
from Xlib.ext.xtest import fake_input
from Xlib.ext import record
from Xlib.protocol import rq
from .base import PyMouseMeta, PyMouseEventMeta, ScrollSupportError
class X11Error(Exception):
"""An error that is thrown at the end of a code block managed by a
:func:`display_manager` if an *X11* error occurred.
"""
pass
def display_manager(display):
"""Traps *X* errors and raises an :class:``X11Error`` at the end if any
error occurred.
This handler also ensures that the :class:`Xlib.display.Display` being
managed is sync'd.
:param Xlib.display.Display display: The *X* display.
:return: the display
:rtype: Xlib.display.Display
"""
from contextlib import contextmanager
@contextmanager
def manager():
errors = []
def handler(*args):
errors.append(args)
old_handler = display.set_error_handler(handler)
yield display
display.sync()
display.set_error_handler(old_handler)
if errors:
raise X11Error(errors)
return manager()
def translate_button_code(button):
# In X11, the button numbers are:
# leftclick=1, middleclick=2, rightclick=3
# For the purposes of the cross-platform interface of PyMouse, we
# invert the button number values of the right and middle buttons
if button in [1, 2, 3]:
return (None, 1, 3, 2)[button]
else:
return button
def button_code_to_scroll_direction(button):
# scrollup=4, scrolldown=5, scrollleft=6, scrollright=7
return {
4: (1, 0),
5: (-1, 0),
6: (0, 1),
7: (0, -1),
}[button]
class PyMouse(PyMouseMeta):
def __init__(self, display=None):
PyMouseMeta.__init__(self)
self.display = Display(display)
self.display2 = Display(display)
def press(self, x, y, button=1):
self.move(x, y)
with display_manager(self.display) as d:
fake_input(d, X.ButtonPress, translate_button_code(button))
def release(self, x, y, button=1):
self.move(x, y)
with display_manager(self.display) as d:
fake_input(d, X.ButtonRelease, translate_button_code(button))
def scroll(self, vertical=None, horizontal=None, depth=None):
#Xlib supports only vertical and horizontal scrolling
if depth is not None:
raise ScrollSupportError('PyMouse cannot support depth-scrolling \
in X11. This feature is only available on Mac.')
#Execute vertical then horizontal scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
self.click(*self.position(), button=4, n=vertical)
else: # Scroll down if negative
self.click(*self.position(), button=5, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
self.click(*self.position(), button=7, n=horizontal)
else: # Scroll left if negative
self.click(*self.position(), button=6, n=abs(horizontal))
def move(self, x, y):
if (x, y) != self.position():
with display_manager(self.display) as d:
fake_input(d, X.MotionNotify, x=x, y=y)
def drag(self, x, y):
with display_manager(self.display) as d:
fake_input(d, X.ButtonPress, 1)
fake_input(d, X.MotionNotify, x=x, y=y)
fake_input(d, X.ButtonRelease, 1)
def position(self):
coord = self.display.screen().root.query_pointer()._data
return coord["root_x"], coord["root_y"]
def screen_size(self):
width = self.display.screen().width_in_pixels
height = self.display.screen().height_in_pixels
return width, height
class PyMouseEvent(PyMouseEventMeta):
def __init__(self, capture=False, capture_move=False, display=None):
PyMouseEventMeta.__init__(self,
capture=capture,
capture_move=capture_move)
self.display = Display(display)
self.display2 = Display(display)
self.ctx = self.display2.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.ButtonPressMask, X.ButtonReleaseMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
def run(self):
try:
if self.capture and self.capture_move:
capturing = X.ButtonPressMask | X.ButtonReleaseMask | X.PointerMotionMask
elif self.capture:
capturing = X.ButtonPressMask | X.ButtonReleaseMask
elif self.capture_move:
capturing = X.PointerMotionMask
else:
capturing = False
if capturing:
self.display2.screen().root.grab_pointer(True,
capturing,
X.GrabModeAsync,
X.GrabModeAsync,
0, 0, X.CurrentTime)
self.display.screen().root.grab_pointer(True,
capturing,
X.GrabModeAsync,
X.GrabModeAsync,
0, 0, X.CurrentTime)
self.display2.record_enable_context(self.ctx, self.handler)
self.display2.record_free_context(self.ctx)
except KeyboardInterrupt:
self.stop()
def stop(self):
self.state = False
with display_manager(self.display) as d:
d.ungrab_pointer(X.CurrentTime)
d.record_disable_context(self.ctx)
with display_manager(self.display2) as d:
d.ungrab_pointer(X.CurrentTime)
d.record_disable_context(self.ctx)
def handler(self, reply):
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.display.display, None, None)
if event.detail in [4, 5, 6, 7]:
if event.type == X.ButtonPress:
self.scroll(event.root_x, event.root_y, *button_code_to_scroll_direction(event.detail))
elif event.type == X.ButtonPress:
self.click(event.root_x, event.root_y, translate_button_code(event.detail), True)
elif event.type == X.ButtonRelease:
self.click(event.root_x, event.root_y, translate_button_code(event.detail), False)
else:
self.move(event.root_x, event.root_y)
| gpl-3.0 | 9,175,964,851,776,071,000 | 36.146789 | 107 | 0.567301 | false |
tensorflow/models | research/efficient-hrl/environments/point.py | 4 | 3150 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for creating the ant environment in gym_mujoco."""
import math
import numpy as np
import mujoco_py
from gym import utils
from gym.envs.mujoco import mujoco_env
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle):
FILE = "point.xml"
ORI_IND = 2
def __init__(self, file_path=None, expose_all_qpos=True):
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 1)
utils.EzPickle.__init__(self)
@property
def physics(self):
# check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity
# check https://github.com/openai/mujoco-py/issues/80 for updates to api
if mujoco_py.get_version() >= '1.50':
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, action):
action[0] = 0.2 * action[0]
qpos = np.copy(self.physics.data.qpos)
qpos[2] += action[1]
ori = qpos[2]
# compute increment in each direction
dx = math.cos(ori) * action[0]
dy = math.sin(ori) * action[0]
# ensure that the robot is within reasonable range
qpos[0] = np.clip(qpos[0] + dx, -100, 100)
qpos[1] = np.clip(qpos[1] + dy, -100, 100)
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
for _ in range(0, self.frame_skip):
self.physics.step()
next_obs = self._get_obs()
reward = 0
done = False
info = {}
return next_obs, reward, done, info
def _get_obs(self):
if self._expose_all_qpos:
return np.concatenate([
self.physics.data.qpos.flat[:3], # Only point-relevant coords.
self.physics.data.qvel.flat[:3]])
return np.concatenate([
self.physics.data.qpos.flat[2:3],
self.physics.data.qvel.flat[:3]])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.physics.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.physics.model.nv) * .1
# Set everything other than point to original position and 0 velocity.
qpos[3:] = self.init_qpos[3:]
qvel[3:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def get_ori(self):
return self.physics.data.qpos[self.__class__.ORI_IND]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
| apache-2.0 | 3,456,153,939,160,689,700 | 31.474227 | 80 | 0.647302 | false |
sajeeshcs/nested_quota | nova/tests/test_configdrive2.py | 11 | 3730 | # Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import mox
from oslo.config import cfg
from nova import context
from nova.openstack.common import fileutils
from nova import test
from nova.tests import fake_instance
from nova import utils
from nova.virt import configdrive
CONF = cfg.CONF
class FakeInstanceMD(object):
def metadata_for_config_drive(self):
yield ('this/is/a/path/hello', 'This is some content')
class ConfigDriveTestCase(test.NoDBTestCase):
def test_create_configdrive_iso(self):
CONF.set_override('config_drive_format', 'iso9660')
imagefile = None
try:
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
'-allow-lowercase', '-allow-multidot', '-l',
'-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r',
'-V', 'config-2', mox.IgnoreArg(), attempts=1,
run_as_root=False).AndReturn(None)
self.mox.ReplayAll()
with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
(fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
os.close(fd)
c.make_drive(imagefile)
finally:
if imagefile:
fileutils.delete_if_exists(imagefile)
def test_create_configdrive_vfat(self):
CONF.set_override('config_drive_format', 'vfat')
imagefile = None
try:
self.mox.StubOutWithMock(utils, 'mkfs')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(utils, 'trycmd')
utils.mkfs('vfat', mox.IgnoreArg(),
label='config-2').AndReturn(None)
utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
utils.execute('umount', mox.IgnoreArg(),
run_as_root=True).AndReturn(None)
self.mox.ReplayAll()
with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
(fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
os.close(fd)
c.make_drive(imagefile)
# NOTE(mikal): we can't check for a VFAT output here because the
# filesystem creation stuff has been mocked out because it
# requires root permissions
finally:
if imagefile:
fileutils.delete_if_exists(imagefile)
def test_config_drive_required_by_image_property(self):
inst = fake_instance.fake_instance_obj(context.get_admin_context())
inst.config_drive = ''
inst.system_metadata = {
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'}
self.assertTrue(configdrive.required_by(inst))
inst.system_metadata = {
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'}
self.assertFalse(configdrive.required_by(inst))
| apache-2.0 | 704,895,353,789,243,900 | 34.865385 | 78 | 0.608311 | false |
oas89/iktomi | iktomi/unstable/db/sqla/replication.py | 2 | 8766 | '''
Functions for application-level replication.
Therminology:
reflect::
Find an object with the same identifier in the target database without
changing or creating it.
replicate::
Find or create new object with the same identifier in the target database
and update it with data of the current object. Only SQLAlchemy attributes
found in both source and target classes are copied. For objects found via
relationships the following rules apply: private ones are replecated and
references to independent objects are reflected.
'''
from weakref import WeakSet
from sqlalchemy.schema import Column
from sqlalchemy.orm import object_session
from sqlalchemy.orm.util import identity_key
from sqlalchemy.orm.attributes import manager_of_class, QueryableAttribute
from sqlalchemy.orm.properties import ColumnProperty, RelationshipProperty
from sqlalchemy.orm.collections import collection_adapter
from sqlalchemy.orm.attributes import instance_state, instance_dict
from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, ONETOMANY
_included = WeakSet()
_excluded = WeakSet()
def include(prop):
'''Replicate property that is normally not replicated. Right now it's
meaningful for one-to-many relations only.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
#assert isinstance(prop, RelationshipProperty)
_included.add(prop)
def exclude(prop):
'''Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
# Also exclude columns that participate in this relationship
for local in prop.local_columns:
_excluded.add(local)
def reflect(source, model, cache=None):
'''Finds an object of class `model` with the same identifier as the
`source` object'''
if source is None:
return None
if cache and source in cache:
return cache[source]
db = object_session(source)
ident = identity_key(instance=source)[1]
assert ident is not None
return db.query(model).get(ident)
class _PrimaryKeyIsNull(BaseException):
'''Used when setting relationship property to None if this causes setting
not nullable primary key column to NULL. Such objects should be skipped
from replicate_filter.'''
def replicate_relation(source, target, attr, target_attr, cache=None):
if attr.property.cascade.delete_orphan:
process_scalar = replicate_no_merge
process_list = replicate_filter
else:
process_scalar = reflect
process_list = reflect_filter
value = getattr(source, attr.key)
target_attr_model = target_attr.property.mapper.class_
if attr.property.uselist:
adapter = collection_adapter(value)
if adapter:
# Convert any collection to flat iterable
value = adapter.adapt_like_to_iterable(value)
reflection = process_list(value, target_attr_model, cache=cache)
impl = instance_state(target).get_impl(attr.key)
# Set any collection value from flat list
impl._set_iterable(instance_state(target),
instance_dict(target),
reflection)
else:
reflection = process_scalar(value, target_attr_model, cache=cache)
setattr(target, attr.key, reflection)
if (reflection is None and
attr.property.direction is MANYTOONE and
any(col.primary_key and not col.nullable
for col in attr.property.local_columns)):
raise _PrimaryKeyIsNull()
def is_relation_replicatable(attr):
if attr.property in _included:
return True
elif attr.property in _excluded:
return False
elif attr.property.viewonly:
return False
elif attr.property.cascade.delete_orphan:
# Private, replicate
return True
elif attr.property.direction is MANYTOMANY:
# Many-to-many. Usualy one side is short list and other is long or
# absent. Reflect if not dynamic, other cases should be excluded
# manually.
assert attr.property.lazy in (True, False, 'dynamic')
return attr.property.lazy!='dynamic'
elif attr.property.direction is MANYTOONE:
# Many-to-one and one-to-one with FK pointing from from this side to
# other.
return True
else:
assert attr.property.direction is ONETOMANY
return False
def _column_property_in_registry(prop, registry):
if prop in registry:
return True
elif len(prop.columns)==1:
# Column() is translated to ColumnProperty with single column
return prop.columns[0] in registry
else:
return False
def replicate_attributes(source, target, cache=None):
'''Replicates common SQLAlchemy attributes from the `source` object to the
`target` object.'''
target_manager = manager_of_class(type(target))
column_attrs = set()
relationship_attrs = set()
relationship_columns = set()
for attr in manager_of_class(type(source)).attributes:
if attr.key not in target_manager:
# It's not common attribute
continue
target_attr = target_manager[attr.key]
if isinstance(attr.property, ColumnProperty):
assert isinstance(target_attr.property, ColumnProperty)
column_attrs.add(attr)
elif isinstance(attr.property, RelationshipProperty):
assert isinstance(target_attr.property, RelationshipProperty)
relationship_attrs.add(attr)
if attr.property.direction is MANYTOONE:
relationship_columns.update(attr.property.local_columns)
for attr in column_attrs:
if _column_property_in_registry(attr.property, _excluded):
continue
elif (not _column_property_in_registry(attr.property, _included) and
all(column in relationship_columns
for column in attr.property.columns)):
continue
setattr(target, attr.key, getattr(source, attr.key))
for attr in relationship_attrs:
target_attr_model = target_manager[attr.key].property.argument
if not is_relation_replicatable(attr):
continue
replicate_relation(source, target, attr, target_manager[attr.key],
cache=cache)
def replicate_no_merge(source, model, cache=None):
'''Replicates the `source` object to `model` class and returns its
reflection.'''
# `cache` is used to break circular dependency: we need to replicate
# attributes before merging target into the session, but replication of
# some attributes may require target to be in session to avoid infinite
# loop.
if source is None:
return None
if cache is None:
cache = {}
elif source in cache:
return cache[source]
db = object_session(source)
cls, ident = identity_key(instance=source)
target = db.query(model).get(ident)
if target is None:
target = model()
cache[source] = target
try:
replicate_attributes(source, target, cache=cache)
except _PrimaryKeyIsNull:
return None
else:
return target
def replicate(source, model, cache=None):
'''Replicates the `source` object to `model` class and returns its
reflection.'''
target = replicate_no_merge(source, model, cache=cache)
if target is not None:
db = object_session(source)
target = db.merge(target)
return target
def replicate_filter(sources, model, cache=None):
'''Replicates the list of objects to other class and returns their
reflections'''
targets = [replicate_no_merge(source, model, cache=cache)
for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None]
def reflect_filter(sources, model, cache=None):
'''Returns the list of reflections of objects in the `source` list to other
class. Objects that are not found in target table are silently discarded.
'''
targets = [reflect(source, model, cache=cache) for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None]
| mit | -4,490,608,244,913,130,500 | 37.447368 | 79 | 0.682067 | false |
michalskalski/puppet-eseries-old | acceptancetests/netapp_helper_class.py | 1 | 11229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2
from netapp_restlibs import generic_delete, generic_post, generic_get, array_controller
import os
import sh
import time
import re
import subprocess
import netapp_config as configuration
import datetime
import logging
import random
class MyTestSuite(unittest2.TestCase):
cookie = None
http_pool = None
url = None
log = None
ch = None
output = None
returncode = None
bck_manifest_name = None
manifest_path = None
first_system_id = None
first_system_ip1 = None
first_system_ip2 = None
first_system_pass = None
first_system_test_pass = None
first_system_test_ip = None
########################################################################################################################
@classmethod
def switch_to_custom_manifest(cls, manifest_body):
'''
Helper to overwrite original manifest by custom manifest
:param manifest_body:
:return: None
'''
with open("/var/tmp/netapp_test_suite_tmp_site.pp", 'w') as temp_site_pp:
temp_site_pp.write(manifest_body)
if os.geteuid() != 0:
sh.sudo('/bin/mv', '/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp")
sh.sudo('/bin/chmod', '664', cls.manifest_path + "/site.pp")
else:
sh.mv('/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp")
sh.chmod('664', cls.manifest_path + "/site.pp")
# Show how looks like site.pp for now
cls.log.debug("How looks site.pp for now (by 'cat {0}'):".format(cls.manifest_path + "/site.pp"))
cls.log.debug(sh.cat(cls.manifest_path + "/site.pp"))
########################################################################################################################
@classmethod
def get_system_list(cls):
objectBundle = generic_get('storage-systems')
system_list = [s['id'] for s in objectBundle]
return system_list
########################################################################################################################
@classmethod
def parse_multiline(cls, string):
retval = ''
for char in string:
retval += char if not char == '\n' else ''
if char == '\n':
yield retval
retval = ''
if retval:
yield retval
return
########################################################################################################################
@classmethod
def output_errors_has_not(cls, pattern):
for i in cls.parse_multiline(cls.output):
if i.find('Error')>-1:
if i.find(pattern)>-1:
cls.log.debug("Line from command output contains {pattern}:\n>>>{line}\n".format(pattern=pattern, line=i))
return False
return True
########################################################################################################################
@classmethod
def output_errors_has(cls, pattern):
# TODO Make return all lines not one
result = False
for i in cls.parse_multiline(cls.output):
if i.find('Error')>-1:
if i.find(pattern)>-1:
cls.log.debug("Line from command output contains {pattern}:\n>>>{line}\n".format(pattern=pattern, line=i))
result=True
return result
########################################################################################################################
@classmethod
def run_puppet_device(cls, verbose=False):
'''
Helper to run puppet device by subprocess
'''
cls.log.debug("Delaying for working out puppet device issue...")
time.sleep(15)
cls.log.debug("Running shell command 'puppet device' by subprocess...")
#return subprocess.check_output(['puppet device --debug --user root', '-1'], shell=True)
child = subprocess.Popen(['puppet','device','--debug','--user','root', ],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
cls.output = child.communicate()[0]
cls.returncode = child.returncode
if verbose:
cls.log.debug('Output from puppet command:\n {output}\nReturn code: {returncode}\n'.format(output=cls.output,
returncode=cls.returncode))
# TODO What if we want to see not only systems but their entities????
return cls.get_system_list()
########################################################################################################################
@classmethod
def rid_all_BANANAs(cls):
for i in cls.get_system_list():
if re.match(r'^BANANA[0-9]?_',i):
cls.log.debug("Delete '{system_id}' system by REST ...".format(system_id=i))
generic_delete('storage-system',array_id=i)
return
########################################################################################################################
@classmethod
def rid_system(cls, system_id):
if cls.get_system_list():
cls.log.debug("Delete '{system_id}' system by REST ...".format(system_id=system_id))
try:
generic_delete('storage-system',array_id=system_id)
except:
pass
return
########################################################################################################################
@classmethod
def restore_first_system_by_REST(cls):
if cls.first_system_id not in cls.get_system_list():
data = { "id": cls.first_system_id, "controllerAddresses": [cls.first_system_ip1,cls.first_system_ip1], "password":cls.first_system_pass}
first_system = generic_post('storage-systems', data)
return
########################################################################################################################
@classmethod
def remove_line_from_multiline_regexp(cls, multiline, pattern):
result = ''
for l in cls.parse_multiline(multiline):
if not re.search(pattern,l):
result = result + l + '\n'
return result
########################################################################################################################
# Get ip by REST
@classmethod
def get_ips_by_REST(cls, system_id):
raw_ethernet_interfaces_list = generic_get('ethernet-interfaces', array_id=system_id)
actual_ips = [i['ipv4Address'] for i in raw_ethernet_interfaces_list
if i['linkStatus'].strip() == 'up' and i['ipv4Enabled']]
return actual_ips
########################################################################################################################
# Construct dictionary for manifest section of first system
@classmethod
def construct_dict_for_first_system(cls, signature='BANANA_{0}', rand_hash=hex(random.getrandbits(24))[2:-1]):
dict={}
dict['system_id'] = cls.first_system_id
dict['system_ip1'] = cls.first_system_ip1
dict['system_ip2'] = cls.first_system_ip2
dict['ensure'] = 'present'
dict['system_pass']=cls.first_system_pass
dict['signature']=signature.format(rand_hash)
return dict
########################################################################################################################
@classmethod
def get_random_mac(cls):
random_mac = [0x00, 0x24, 0x81,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ''.join(map(lambda x: "%02x" % x, random_mac))
########################################################################################################################
@classmethod
def setUpClass(cls):
# Prepare logger
cls.log = logging.getLogger('netapp_puppet_module_tests')
cls.log.setLevel(logging.DEBUG)
cls.ch = logging.StreamHandler()
cls.ch.setLevel(logging.DEBUG)
cls.log.addHandler(cls.ch)
cls.log.debug("\n"+"-"*45 +" Tests is starting "+"-"*45 + '\n')
# Check if 'puppet agent --configprint usecacheonfailure' if false
cls.log.debug("Puppet agent option 'usecacheonfailure' is set to: " + sh.puppet('agent','--configprint','usecacheonfailure').upper().strip())
if sh.puppet('agent','--configprint','usecacheonfailure').upper().strip()!='FALSE':
raise Exception("You need to set Puppet agent option 'usecacheonfailure' on 'false'!")
# Read config
cls.log.debug("Reading configuration...")
cls.url = configuration.server_root_url
cls.manifest_path = configuration.manifest_path
cls.first_system_id = configuration.first_system_id
cls.first_system_ip1 = configuration.first_system_ip1
cls.first_system_ip2 = configuration.first_system_ip2
cls.first_system_pass = configuration.first_system_pass
cls.first_system_test_pass = configuration.first_system_test_pass
cls.first_system_test_ip = configuration.first_system_test_ip
cls.second_system_id = configuration.second_system_id
cls.second_system_ip1 = configuration.second_system_ip1
cls.second_system_ip2 = configuration.second_system_ip2
cls.second_system_pass = configuration.second_system_pass
# Save current site.pp
cls.bck_manifest_name = cls.manifest_path + \
'/site.pp.' + \
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S') + \
'.bck'
cls.log.debug("Saving original site.pp to {0}...".format(cls.bck_manifest_name))
# Hack for local running
if os.geteuid() != 0:
sh.sudo('/bin/cp', cls.manifest_path + "/site.pp", cls.bck_manifest_name)
sh.sudo('/bin/chmod', '664', cls.bck_manifest_name)
else:
sh.cp(cls.manifest_path + "/site.pp", cls.bck_manifest_name)
sh.chmod('664', cls.bck_manifest_name)
return
########################################################################################################################
@classmethod
def tearDownClass(cls):
cls.log.debug("\n"+"#"*90)
# Getting back original site.pp
cls.log.debug("Restoring original site.pp ...")
if os.geteuid() != 0:
sh.sudo('/bin/mv', cls.bck_manifest_name, cls.manifest_path + "/site.pp" )
else:
sh.mv(cls.bck_manifest_name, cls.manifest_path + "/site.pp")
return
########################################################################################################################
def setUp(self):
self.log.debug('\n\n'+"#"*25+" "+str(self.id())+" "+"#"*25+'\n')
| apache-2.0 | 5,870,457,747,295,706,000 | 37.72069 | 152 | 0.479295 | false |
zlorb/mitmproxy | test/mitmproxy/tools/web/test_app.py | 4 | 11558 | import json as _json
import logging
from unittest import mock
import os
import asyncio
import pytest
import tornado.testing
from tornado import httpclient
from tornado import websocket
from mitmproxy import options
from mitmproxy.test import tflow
from mitmproxy.tools.web import app
from mitmproxy.tools.web import master as webmaster
@pytest.fixture(scope="module")
def no_tornado_logging():
logging.getLogger('tornado.access').disabled = True
logging.getLogger('tornado.application').disabled = True
logging.getLogger('tornado.general').disabled = True
yield
logging.getLogger('tornado.access').disabled = False
logging.getLogger('tornado.application').disabled = False
logging.getLogger('tornado.general').disabled = False
def json(resp: httpclient.HTTPResponse):
return _json.loads(resp.body.decode())
@pytest.mark.usefixtures("no_tornado_logging")
class TestApp(tornado.testing.AsyncHTTPTestCase):
def get_new_ioloop(self):
io_loop = tornado.platform.asyncio.AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def get_app(self):
o = options.Options(http2=False)
m = webmaster.WebMaster(o, with_termlog=False)
f = tflow.tflow(resp=True)
f.id = "42"
m.view.add([f])
m.view.add([tflow.tflow(err=True)])
m.log.info("test log")
self.master = m
self.view = m.view
self.events = m.events
webapp = app.Application(m, None)
webapp.settings["xsrf_cookies"] = False
return webapp
def fetch(self, *args, **kwargs) -> httpclient.HTTPResponse:
# tornado disallows POST without content by default.
return super().fetch(*args, **kwargs, allow_nonstandard_methods=True)
def put_json(self, url, data: dict) -> httpclient.HTTPResponse:
return self.fetch(
url,
method="PUT",
body=_json.dumps(data),
headers={"Content-Type": "application/json"},
)
def test_index(self):
assert self.fetch("/").code == 200
def test_filter_help(self):
assert self.fetch("/filter-help").code == 200
def test_flows(self):
resp = self.fetch("/flows")
assert resp.code == 200
assert json(resp)[0]["request"]["contentHash"]
assert json(resp)[1]["error"]
def test_flows_dump(self):
resp = self.fetch("/flows/dump")
assert b"address" in resp.body
def test_clear(self):
events = self.events.data.copy()
flows = list(self.view)
assert self.fetch("/clear", method="POST").code == 200
assert not len(self.view)
assert not len(self.events.data)
# restore
for f in flows:
self.view.add([f])
self.events.data = events
def test_resume(self):
for f in self.view:
f.intercept()
assert self.fetch(
"/flows/42/resume", method="POST").code == 200
assert sum(f.intercepted for f in self.view) == 1
assert self.fetch("/flows/resume", method="POST").code == 200
assert all(not f.intercepted for f in self.view)
def test_kill(self):
for f in self.view:
f.backup()
f.intercept()
assert self.fetch("/flows/42/kill", method="POST").code == 200
assert sum(f.killable for f in self.view) == 1
assert self.fetch("/flows/kill", method="POST").code == 200
assert all(not f.killable for f in self.view)
for f in self.view:
f.revert()
def test_flow_delete(self):
f = self.view.get_by_id("42")
assert f
assert self.fetch("/flows/42", method="DELETE").code == 200
assert not self.view.get_by_id("42")
self.view.add([f])
assert self.fetch("/flows/1234", method="DELETE").code == 404
def test_flow_update(self):
f = self.view.get_by_id("42")
assert f.request.method == "GET"
f.backup()
upd = {
"request": {
"method": "PATCH",
"port": 123,
"headers": [("foo", "bar")],
"content": "req",
},
"response": {
"msg": "Non-Authorisé",
"code": 404,
"headers": [("bar", "baz")],
"content": "resp",
}
}
assert self.put_json("/flows/42", upd).code == 200
assert f.request.method == "PATCH"
assert f.request.port == 123
assert f.request.headers["foo"] == "bar"
assert f.request.text == "req"
assert f.response.msg == "Non-Authorisé"
assert f.response.status_code == 404
assert f.response.headers["bar"] == "baz"
assert f.response.text == "resp"
f.revert()
assert self.put_json("/flows/42", {"foo": 42}).code == 400
assert self.put_json("/flows/42", {"request": {"foo": 42}}).code == 400
assert self.put_json("/flows/42", {"response": {"foo": 42}}).code == 400
assert self.fetch("/flows/42", method="PUT", body="{}").code == 400
assert self.fetch(
"/flows/42",
method="PUT",
headers={"Content-Type": "application/json"},
body="!!"
).code == 400
def test_flow_duplicate(self):
resp = self.fetch("/flows/42/duplicate", method="POST")
assert resp.code == 200
f = self.view.get_by_id(resp.body.decode())
assert f
assert f.id != "42"
self.view.remove([f])
def test_flow_revert(self):
f = self.view.get_by_id("42")
f.backup()
f.request.method = "PATCH"
self.fetch("/flows/42/revert", method="POST")
assert not f._backup
def test_flow_replay(self):
with mock.patch("mitmproxy.command.CommandManager.call") as replay_call:
assert self.fetch("/flows/42/replay", method="POST").code == 200
assert replay_call.called
def test_flow_content(self):
f = self.view.get_by_id("42")
f.backup()
f.response.headers["Content-Encoding"] = "ran\x00dom"
f.response.headers["Content-Disposition"] = 'inline; filename="filename.jpg"'
r = self.fetch("/flows/42/response/content.data")
assert r.body == b"message"
assert r.headers["Content-Encoding"] == "random"
assert r.headers["Content-Disposition"] == 'attachment; filename="filename.jpg"'
del f.response.headers["Content-Disposition"]
f.request.path = "/foo/bar.jpg"
assert self.fetch(
"/flows/42/response/content.data"
).headers["Content-Disposition"] == 'attachment; filename=bar.jpg'
f.response.content = b""
assert self.fetch("/flows/42/response/content.data").code == 400
f.revert()
def test_update_flow_content(self):
assert self.fetch(
"/flows/42/request/content.data",
method="POST",
body="new"
).code == 200
f = self.view.get_by_id("42")
assert f.request.content == b"new"
assert f.modified()
f.revert()
def test_update_flow_content_multipart(self):
body = (
b'--somefancyboundary\r\n'
b'Content-Disposition: form-data; name="a"; filename="a.txt"\r\n'
b'\r\n'
b'such multipart. very wow.\r\n'
b'--somefancyboundary--\r\n'
)
assert self.fetch(
"/flows/42/request/content.data",
method="POST",
headers={"Content-Type": 'multipart/form-data; boundary="somefancyboundary"'},
body=body
).code == 200
f = self.view.get_by_id("42")
assert f.request.content == b"such multipart. very wow."
assert f.modified()
f.revert()
def test_flow_content_view(self):
assert json(self.fetch("/flows/42/request/content/raw")) == {
"lines": [
[["text", "content"]]
],
"description": "Raw"
}
def test_events(self):
resp = self.fetch("/events")
assert resp.code == 200
assert json(resp)[0]["level"] == "info"
def test_settings(self):
assert json(self.fetch("/settings"))["mode"] == "regular"
def test_settings_update(self):
assert self.put_json("/settings", {"anticache": True}).code == 200
assert self.put_json("/settings", {"wtf": True}).code == 400
def test_options(self):
j = json(self.fetch("/options"))
assert type(j) == dict
assert type(j['anticache']) == dict
def test_option_update(self):
assert self.put_json("/options", {"anticache": True}).code == 200
assert self.put_json("/options", {"wtf": True}).code == 400
assert self.put_json("/options", {"anticache": "foo"}).code == 400
def test_option_save(self):
assert self.fetch("/options/save", method="POST").code == 200
def test_err(self):
with mock.patch("mitmproxy.tools.web.app.IndexHandler.get") as f:
f.side_effect = RuntimeError
assert self.fetch("/").code == 500
@tornado.testing.gen_test
def test_websocket(self):
ws_url = "ws://localhost:{}/updates".format(self.get_http_port())
ws_client = yield websocket.websocket_connect(ws_url)
self.master.options.anticomp = True
r1 = yield ws_client.read_message()
r2 = yield ws_client.read_message()
j1 = _json.loads(r1)
j2 = _json.loads(r2)
response = dict()
response[j1['resource']] = j1
response[j2['resource']] = j2
assert response['settings'] == {
"resource": "settings",
"cmd": "update",
"data": {"anticomp": True},
}
assert response['options'] == {
"resource": "options",
"cmd": "update",
"data": {
"anticomp": {
"value": True,
"choices": None,
"default": False,
"help": "Try to convince servers to send us un-compressed data.",
"type": "bool",
}
}
}
ws_client.close()
# trigger on_close by opening a second connection.
ws_client2 = yield websocket.websocket_connect(ws_url)
ws_client2.close()
def _test_generate_tflow_js(self):
_tflow = app.flow_to_json(tflow.tflow(resp=True, err=True))
# Set some value as constant, so that _tflow.js would not change every time.
_tflow['client_conn']['id'] = "4a18d1a0-50a1-48dd-9aa6-d45d74282939"
_tflow['id'] = "d91165be-ca1f-4612-88a9-c0f8696f3e29"
_tflow['error']['timestamp'] = 1495370312.4814785
_tflow['response']['timestamp_end'] = 1495370312.4814625
_tflow['response']['timestamp_start'] = 1495370312.481462
_tflow['server_conn']['id'] = "f087e7b2-6d0a-41a8-a8f0-e1a4761395f8"
tflow_json = _json.dumps(_tflow, indent=4, sort_keys=True)
here = os.path.abspath(os.path.dirname(__file__))
web_root = os.path.join(here, os.pardir, os.pardir, os.pardir, os.pardir, 'web')
tflow_path = os.path.join(web_root, 'src/js/__tests__/ducks/_tflow.js')
content = """export default function(){{\n return {tflow_json}\n}}""".format(tflow_json=tflow_json)
with open(tflow_path, 'w', newline="\n") as f:
f.write(content)
| mit | -1,755,250,077,574,908,200 | 33.495522 | 110 | 0.564296 | false |
arkro/vizures | ratproject/rat/models.py | 1 | 2390 | from django.db import models
# Create your models here.
class Student(models.Model):
reg_no = models.IntegerField(primary_key=True,default='20120000')
roll_no = models.CharField(max_length=10,default='')
name = models.CharField(max_length=100)
department = models.CharField(max_length=50,default= 'CSE')
cgpa = models.DecimalField(max_digits=4,decimal_places=2)
def __unicode__(self):
return str(self.reg_no)+" "+self.name+" "+self.roll_no+" "+str(self.cgpa)
class Semester(models.Model):
sem = models.IntegerField(primary_key=True)
sub1 = models.CharField(max_length=50)
sub2 = models.CharField(max_length=50)
sub3 = models.CharField(max_length=50)
sub4 = models.CharField(max_length=50)
sub5 = models.CharField(max_length=50)
lab1 = models.CharField(max_length=50)
lab2 = models.CharField(max_length=50)
lab3 = models.CharField(max_length=50)
#def __unicode__(self):
# return str(self.sem)+" "+self.sub1+" "+self.sub2+" "+self.sub3+" "\
#+self.sub4+" "+self.sub5+" "+ self.lab1+ " " + self.lab2+ " "+ self.lab3
class Marks(models.Model):
reg_no = models.ForeignKey(Student)
sem = models.ForeignKey(Semester)
sub1 = models.IntegerField(default=0)
sub2 = models.IntegerField(default=0)
sub3 = models.IntegerField(default=0)
sub4 = models.IntegerField(default=0)
sub5 = models.IntegerField(default=0)
lab1 = models.IntegerField(default=0)
lab2 = models.IntegerField(default=0)
lab3 = models.IntegerField(default=0)
sgpa = models.DecimalField(max_digits=4,decimal_places=2)
cgpa = models.DecimalField(max_digits=4,decimal_places=2)
sem_credits = models.IntegerField(default=0)
total_credits = models.IntegerField(default=0)
remarks = models.CharField(max_length=10)
def __unicode__(self):
return str(self.sem)+" "+str(self.reg_no)+ " "+ str(self.sub1) +" "+str(self.sub2)+" "+str(self.sub3)+" "\
+ str(self.sub4)+" "+str(self.sub5)+" "+ str(self.lab1) + " " + str(self.lab2)+ " "+ str(self.lab3) + " "\
+ str(self.sgpa)+ " "+ str(self.cgpa)+ " "+ str(self.sem_credits)+ " "\
+ str(self.total_credits)+" "+ self.remarks
#class Student(models.Model):
# name = models.CharField(max_length=60, blank=False)
# roll = models.CharField(max_length=10, primary_key=True, blank=False)
# cgpa = models.FloatField()
| mit | -5,346,791,335,817,699,000 | 40.929825 | 114 | 0.663598 | false |
gugahoi/maraschino | modules/couchpotato.py | 2 | 14384 | from flask import render_template, request, jsonify, json, send_file
from jinja2.filters import FILTERS
from maraschino.tools import get_setting_value, requires_auth
from maraschino import logger, app, WEBROOT
import urllib2
import StringIO
import base64
import re
def couchpotato_http():
if get_setting_value('couchpotato_https') == '1':
return 'https://'
else:
return 'http://'
def couchpotato_url():
port = get_setting_value('couchpotato_port')
url_base = get_setting_value('couchpotato_ip')
webroot = get_setting_value('couchpotato_webroot')
if port:
url_base = '%s:%s' % (url_base, port)
if webroot:
url_base = '%s/%s' % (url_base, webroot)
url = '%s/api/%s' % (url_base, get_setting_value('couchpotato_api'))
return couchpotato_http() + url
def couchpotato_url_no_api():
port = get_setting_value('couchpotato_port')
url_base = get_setting_value('couchpotato_ip')
webroot = get_setting_value('couchpotato_webroot')
if port:
url_base = '%s:%s' % (url_base, port)
if webroot:
url_base = '%s/%s' % (url_base, webroot)
return couchpotato_http() + url_base
def couchpotato_api(method, params=None, use_json=True, dev=False):
username = get_setting_value('couchpotato_user')
password = get_setting_value('couchpotato_password')
if params:
params = '/?%s' % params
else:
params = '/'
params = (params).replace(' ', '%20')
url = '%s/%s%s' % (couchpotato_url(), method, params)
req = urllib2.Request(url)
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
data = urllib2.urlopen(req).read()
if dev:
print url
print data
if use_json:
data = json.JSONDecoder().decode(data)
return data
def log_exception(e):
logger.log('CouchPotato :: EXCEPTION -- %s' % e, 'DEBUG')
def couchpotato_image(path):
print path
path_pieces = re.split('\\/', path)
return '%s/xhr/couchpotato/image/%s' % (WEBROOT, path_pieces[-1])
FILTERS['cp_img'] = couchpotato_image
@app.route('/xhr/couchpotato/image/<path:url>')
def couchpotato_proxy(url):
username = get_setting_value('couchpotato_user')
password = get_setting_value('couchpotato_password')
url = '%s/file.cache/%s' % (couchpotato_url(), url)
req = urllib2.Request(url)
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
img = StringIO.StringIO(urllib2.urlopen(req).read())
logger.log('CouchPotato :: Fetching image from %s' % (url), 'DEBUG')
return send_file(img, mimetype='image/jpeg')
@app.route('/xhr/couchpotato/')
@app.route('/xhr/couchpotato/<status>/')
def xhr_couchpotato(status='active'):
profiles = {}
status_string = 'status=%s' % status
template = 'couchpotato.html'
if status is not 'active':
template = 'couchpotato/all.html'
try:
logger.log('CouchPotato :: Fetching "%s movies" list' % status, 'INFO')
couchpotato = couchpotato_api('movie.list', params=status_string)
except Exception as e:
log_exception(e)
couchpotato = None
logger.log('CouchPotato :: Fetching "%s movies" list (DONE)' % status, 'INFO')
if status == 'wanted' and not type(couchpotato) is list:
logger.log('CouchPotato :: Wanted movies list is empty', 'INFO')
return cp_search('There are no movies in your wanted list.')
profiles = couchpotato_api('profile.list')
for movie in couchpotato['movies']:
for profile in profiles['list']:
if profile['_id'] == movie['profile_id']:
movie['profile_label'] = profile['label']
return render_template(template,
url=couchpotato_url(),
app_link=couchpotato_url_no_api(),
couchpotato=couchpotato,
profiles=profiles,
compact_view=get_setting_value('couchpotato_compact') == '1',
)
@app.route('/xhr/couchpotato/history/')
def xhr_couchpotato_history():
unread = 0
try:
couchpotato = couchpotato_api('notification.list')
couchpotato = couchpotato['notifications']
for notification in couchpotato:
if not notification['read']:
unread = unread + 1
except Exception as e:
logger.log('CouchPotato :: Could not retrieve Couchpotato - %s' % (e), 'WARNING')
couchpotato = "empty"
return render_template('couchpotato/history.html',
couchpotato=couchpotato,
unread=unread,
)
@app.route('/xhr/couchpotato/search/')
def cp_search(message=None):
couchpotato = {}
params = False
profiles = {}
try:
query = request.args['name']
params = 'q=' + query
except:
pass
if params:
try:
logger.log('CouchPotato :: Searching for movie: %s' % (query), 'INFO')
couchpotato = couchpotato_api('movie.search', params=params)
amount = len(couchpotato['movies'])
logger.log('CouchPotato :: found %i movies for %s' % (amount, query), 'INFO')
if couchpotato['success'] and amount != 0:
couchpotato = couchpotato['movies']
try:
# logger.log('CouchPotato :: Getting quality profiles', 'INFO')
profiles = couchpotato_api('profile.list')
except Exception as e:
log_exception(e)
else:
return render_template('couchpotato/search.html', error='No movies with "%s" were found' % (query), couchpotato='results')
except Exception as e:
log_exception(e)
couchpotato = None
else:
logger.log('CouchPotato :: Loading search template', 'DEBUG')
couchpotato = None
return render_template('couchpotato/search.html',
data=couchpotato,
couchpotato='results',
profiles=profiles,
error=message
)
@app.route('/xhr/couchpotato/add_movie/<imdbid>/<title>/')
@app.route('/xhr/couchpotato/add_movie/<imdbid>/<title>/<profile>/')
def add_movie(imdbid, title, profile=False):
if profile:
params = 'identifier=%s&title=%s&profile_id=%s' % (imdbid, title, profile)
else:
params = 'identifier=%s&title=%s' % (imdbid, title)
try:
logger.log('CouchPotato :: Adding %s (%s) to wanted list' % (title, imdbid), 'INFO')
result = couchpotato_api('movie.add', params)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/restart/')
@requires_auth
def cp_restart():
try:
logger.log('CouchPotato :: Restarting', 'INFO')
result = couchpotato_api('app.restart', use_json=False)
if 'restarting' in result:
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/available/')
@requires_auth
def cp_available():
try:
logger.log('CouchPotato :: Checking if CouchPotato is available', 'INFO')
result = couchpotato_api('app.available')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/shutdown/')
@requires_auth
def cp_shutdown():
try:
logger.log('CouchPotato :: Shutting down', 'INFO')
result = couchpotato_api('app.shutdown', use_json=False)
if 'shutdown' in result:
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/version/')
@requires_auth
def cp_version():
try:
result = couchpotato_api('app.version')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/profiles/')
@requires_auth
def cp_profiles():
try:
logger.log('CouchPotato :: Getting profiles', 'INFO')
result = couchpotato_api('profile.list')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/quality/')
@requires_auth
def cp_quality():
try:
logger.log('CouchPotato :: Getting quality', 'INFO')
result = couchpotato_api('quality.list')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/update/check/')
@requires_auth
def cp_update_check():
try:
logger.log('CouchPotato :: Getting update', 'INFO')
result = couchpotato_api('updater.check')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/delete_movie/<id>/')
@requires_auth
def movie_delete(id):
"""
Delete a movie from list
----- Params -----
id int (comma separated) Movie ID(s) you want to delete.
delete_from string: all (default), wanted, manage Delete movie from this page
"""
try:
logger.log('CouchPotato :: Deleting movie %s' % id, 'INFO')
result = couchpotato_api('movie.delete', 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/refresh_movie/<id>/')
def movie_refresh(id):
"""
Refresh a movie from list
----- Params -----
id int (comma separated) Movie ID(s) you want to refresh.
"""
try:
logger.log('CouchPotato :: Refreshing movie %s' % id, 'INFO')
result = couchpotato_api('movie.refresh', 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/settings/')
def cp_settings():
"""
Retrieve settings from CP
"""
try:
logger.log('CouchPotato :: Retrieving settings', 'INFO')
result = couchpotato_api('settings')
logger.log('CouchPotato :: Retrieving settings (DONE)', 'INFO')
return render_template('couchpotato/settings.html',
couchpotato=result,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/get_movie/<id>/')
def cp_get_movie(id):
"""
Retrieve movie from CP
---- Params -----
id int (comma separated) The id of the movie
"""
try:
logger.log('CouchPotato :: Retrieving movie info', 'INFO')
result = couchpotato_api('media.get', 'id=%s' % id)
try:
logger.log('CouchPotato :: Getting quality profiles', 'INFO')
profiles = couchpotato_api('profile.list')
except Exception as e:
log_exception(e)
logger.log('CouchPotato :: Retrieving movie info (DONE)', 'INFO')
return render_template('couchpotato/info.html',
couchpotato=result,
profiles=profiles,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/edit_movie/<movieid>/<profileid>/')
def cp_edit_movie(movieid, profileid):
"""
Edit movie in CP
---- Params -----
movieid int (comma separated) The id of the movie
profileid int Id of the profile to go to
"""
try:
logger.log('CouchPotato :: Retrieving movie info', 'INFO')
result = couchpotato_api('movie.edit', 'id=%s&profile_id=%s' % (movieid, profileid))
if result['success']:
logger.log('CouchPotato :: Retrieving movie info (DONE)', 'INFO')
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/log/')
@app.route('/xhr/couchpotato/log/<type>/<lines>/')
def cp_log(type='all', lines=30):
"""
Edit movie in CP
---- Params -----
type <optional> all, error, info, debug Type of log
lines <optional> int Number of lines - last to first
"""
try:
logger.log('CouchPotato :: Retrieving "%s" log' % type, 'INFO')
result = couchpotato_api('logging.partial', 'type=%s&lines=%s' % (type, lines))
if result['success']:
logger.log('CouchPotato :: Retrieving "%s" log (DONE)' % type, 'INFO')
return render_template('couchpotato/log.html',
couchpotato=result,
level=type,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/notification/read/')
@app.route('/xhr/couchpotato/notification/read/<int:id>/')
def cp_notification_read(id=False):
"""
Mark notification as read in CP
---- Params -----
ids <optional> int Notification id - if empty will mark all notifications
"""
try:
logger.log('CouchPotato :: Marking notification "%i" as read' % id, 'INFO')
if id:
couchpotato_api('notification.markread', 'ids=%i' % id)
else:
couchpotato_api('notification.markread')
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/release/<action>/<id>/')
@requires_auth
def release_action(action, id):
if id.isdigit():
id = int(id)
try:
logger.log('CouchPotato :: %sing release %s' % (action.title()[:-1], id), 'INFO')
result = couchpotato_api('release.%s' % action, 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
| mit | 5,921,693,075,078,668,000 | 29.282105 | 138 | 0.596496 | false |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/functions/special/tests/test_error_functions.py | 4 | 22621 | from sympy import (
symbols, expand, expand_func, nan, oo, Float, conjugate, diff,
re, im, Abs, O, factorial, exp_polar, polar_lift, gruntz, limit,
Symbol, I, integrate, S,
sqrt, sin, cos, sinh, cosh, exp, log, pi, EulerGamma,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv,
gamma, uppergamma, loggamma,
Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc,
hyper, meijerg)
from sympy.functions.special.error_functions import _erfs, _eis
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
x, y, z = symbols('x,y,z')
w = Symbol("w", real=True)
n = Symbol("n", integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_real is False
assert erf(0).is_real is True
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == S.One - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(S.Pi) + 2*z*_erfs(z)
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite("intractable") == (-erf(z) + 1)*exp(z**2)
def test_erfc():
assert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo*I
assert erfc(-x) == S(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(I).is_real is False
assert erfc(0).is_real is True
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == S.One
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(oo) == S.Infinity
assert erfi(-oo) == S.NegativeInfinity
assert erfi(0) == S.Zero
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_real is False
assert erfi(0).is_real is True
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(S.Half,
-z**2)/sqrt(S.Pi) - S.One))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == S.Zero
assert erf2(x, x) == S.Zero
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x,-oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x,y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, 0).is_real is False
assert erf2(0, 0).is_real is True
#assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - erfc(y)
assert erf2(x, y).rewrite('erfi') == I*(erfi(I*x) - erfi(I*y))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(3))
def test_erfinv():
assert erfinv(0) == 0
assert erfinv(1) == S.Infinity
assert erfinv(nan) == S.NaN
assert erfinv(erf(w)) == w
assert erfinv(erf(-w)) == -w
assert erfinv(x).diff() == sqrt(pi)*exp(erfinv(x)**2)/2
assert erfinv(z).rewrite('erfcinv') == erfcinv(1-z)
def test_erfinv_evalf():
assert abs( erfinv(Float(0.2)) - 0.179143454621292 ) < 1E-13
def test_erfcinv():
assert erfcinv(1) == 0
assert erfcinv(0) == S.Infinity
assert erfcinv(nan) == S.NaN
assert erfcinv(x).diff() == -sqrt(pi)*exp(erfcinv(x)**2)/2
assert erfcinv(z).rewrite('erfinv') == erfinv(1-z)
def test_erf2inv():
assert erf2inv(0, 0) == S.Zero
assert erf2inv(0, 1) == S.Infinity
assert erf2inv(1, 0) == S.One
assert erf2inv(0, y) == erfinv(y)
assert erf2inv(oo,y) == erfcinv(-y)
assert erf2inv(x, y).diff(x) == exp(-x**2 + erf2inv(x, y)**2)
assert erf2inv(x, y).diff(y) == sqrt(pi)*exp(erf2inv(x, y)**2)/2
# NOTE we multiply by exp_polar(I*pi) and need this to be on the principal
# branch, hence take x in the lower half plane (d=0).
def mytn(expr1, expr2, expr3, x, d=0):
from sympy.utilities.randtest import test_numerically, random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr2 == expr3 and test_numerically(expr1.subs(subs),
expr2.subs(subs), x, d=d)
def mytd(expr1, expr2, x):
from sympy.utilities.randtest import test_derivative_numerically, \
random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr1.diff(x) == expr2 and test_derivative_numerically(expr1.subs(subs), x)
def tn_branch(func, s=None):
from sympy import I, pi, exp_polar
from random import uniform
def fn(x):
if s is None:
return func(x)
return func(s, x)
c = uniform(1, 5)
expr = fn(c*exp_polar(I*pi)) - fn(c*exp_polar(-I*pi))
eps = 1e-15
expr2 = fn(-c + eps*I) - fn(-c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_ei():
pos = Symbol('p', positive=True)
neg = Symbol('n', negative=True)
assert Ei(-pos) == Ei(polar_lift(-1)*pos) - I*pi
assert Ei(neg) == Ei(polar_lift(neg)) - I*pi
assert tn_branch(Ei)
assert mytd(Ei(x), exp(x)/x, x)
assert mytn(Ei(x), Ei(x).rewrite(uppergamma),
-uppergamma(0, x*polar_lift(-1)) - I*pi, x)
assert mytn(Ei(x), Ei(x).rewrite(expint),
-expint(1, x*polar_lift(-1)) - I*pi, x)
assert Ei(x).rewrite(expint).rewrite(Ei) == Ei(x)
assert Ei(x*exp_polar(2*I*pi)) == Ei(x) + 2*I*pi
assert Ei(x*exp_polar(-2*I*pi)) == Ei(x) - 2*I*pi
assert mytn(Ei(x), Ei(x).rewrite(Shi), Chi(x) + Shi(x), x)
assert mytn(Ei(x*polar_lift(I)), Ei(x*polar_lift(I)).rewrite(Si),
Ci(x) + I*Si(x) + I*pi/2, x)
assert Ei(log(x)).rewrite(li) == li(x)
assert Ei(2*log(x)).rewrite(li) == li(x**2)
assert gruntz(Ei(x+exp(-x))*exp(-x)*x, x, oo) == 1
def test_expint():
assert mytn(expint(x, y), expint(x, y).rewrite(uppergamma),
y**(x - 1)*uppergamma(1 - x, y), x)
assert mytd(
expint(x, y), -y**(x - 1)*meijerg([], [1, 1], [0, 0, 1 - x], [], y), x)
assert mytd(expint(x, y), -expint(x - 1, y), y)
assert mytn(expint(1, x), expint(1, x).rewrite(Ei),
-Ei(x*polar_lift(-1)) + I*pi, x)
assert expint(-4, x) == exp(-x)/x + 4*exp(-x)/x**2 + 12*exp(-x)/x**3 \
+ 24*exp(-x)/x**4 + 24*exp(-x)/x**5
assert expint(-S(3)/2, x) == \
exp(-x)/x + 3*exp(-x)/(2*x**2) - 3*sqrt(pi)*erf(sqrt(x))/(4*x**S('5/2')) \
+ 3*sqrt(pi)/(4*x**S('5/2'))
assert tn_branch(expint, 1)
assert tn_branch(expint, 2)
assert tn_branch(expint, 3)
assert tn_branch(expint, 1.7)
assert tn_branch(expint, pi)
assert expint(y, x*exp_polar(2*I*pi)) == \
x**(y - 1)*(exp(2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(y, x*exp_polar(-2*I*pi)) == \
x**(y - 1)*(exp(-2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(2, x*exp_polar(2*I*pi)) == 2*I*pi*x + expint(2, x)
assert expint(2, x*exp_polar(-2*I*pi)) == -2*I*pi*x + expint(2, x)
assert expint(1, x).rewrite(Ei).rewrite(expint) == expint(1, x)
assert mytn(E1(x), E1(x).rewrite(Shi), Shi(x) - Chi(x), x)
assert mytn(E1(polar_lift(I)*x), E1(polar_lift(I)*x).rewrite(Si),
-Ci(x) + I*Si(x) - I*pi/2, x)
assert mytn(expint(2, x), expint(2, x).rewrite(Ei).rewrite(expint),
-x*E1(x) + exp(-x), x)
assert mytn(expint(3, x), expint(3, x).rewrite(Ei).rewrite(expint),
x**2*E1(x)/2 + (1 - x)*exp(-x)/2, x)
def test__eis():
assert _eis(z).diff(z) == -_eis(z) + 1/z
assert _eis(1/z).series(z) == \
z + z**2 + 2*z**3 + 6*z**4 + 24*z**5 + O(z**6)
assert Ei(z).rewrite('tractable') == exp(z)*_eis(z)
assert li(z).rewrite('tractable') == z*_eis(log(z))
assert _eis(z).rewrite('intractable') == exp(-z)*Ei(z)
assert expand(li(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== li(z).diff(z)
assert expand(Ei(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== Ei(z).diff(z)
def tn_arg(func):
def test(arg, e1, e2):
from random import uniform
v = uniform(1, 5)
v1 = func(arg*x).subs(x, v).n()
v2 = func(e1*v + e2*1e-15).n()
return abs(v1 - v2).n() < 1e-10
return test(exp_polar(I*pi/2), I, 1) and \
test(exp_polar(-I*pi/2), -I, 1) and \
test(exp_polar(I*pi), -1, I) and \
test(exp_polar(-I*pi), -1, -I)
def test_li():
z = Symbol("z")
zr = Symbol("z", real=True)
zp = Symbol("z", positive=True)
zn = Symbol("z", negative=True)
assert li(0) == 0
assert li(1) == -oo
assert li(oo) == oo
assert isinstance(li(z), li)
assert diff(li(z), z) == 1/log(z)
assert conjugate(li(z)) == li(conjugate(z))
assert conjugate(li(-zr)) == li(-zr)
assert conjugate(li(-zp)) == conjugate(li(-zp))
assert conjugate(li(zn)) == conjugate(li(zn))
assert li(z).rewrite(Li) == Li(z) + li(2)
assert li(z).rewrite(Ei) == Ei(log(z))
assert li(z).rewrite(uppergamma) == (-log(1/log(z))/2 - log(-log(z)) +
log(log(z))/2 - expint(1, -log(z)))
assert li(z).rewrite(Si) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Ci) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Shi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(Chi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(hyper) ==(log(z)*hyper((1, 1), (2, 2), log(z)) -
log(1/log(z))/2 + log(log(z))/2 + EulerGamma)
assert li(z).rewrite(meijerg) == (-log(1/log(z))/2 - log(-log(z)) + log(log(z))/2 -
meijerg(((), (1,)), ((0, 0), ()), -log(z)))
assert gruntz(1/li(z), z, oo) == 0
def test_Li():
assert Li(2) == 0
assert Li(oo) == oo
assert isinstance(Li(z), Li)
assert diff(Li(z), z) == 1/log(z)
assert gruntz(1/Li(z), z, oo) == 0
assert Li(z).rewrite(li) == li(z) - li(2)
def test_si():
assert Si(I*x) == I*Shi(x)
assert Shi(I*x) == I*Si(x)
assert Si(-I*x) == -I*Shi(x)
assert Shi(-I*x) == -I*Si(x)
assert Si(-x) == -Si(x)
assert Shi(-x) == -Shi(x)
assert Si(exp_polar(2*pi*I)*x) == Si(x)
assert Si(exp_polar(-2*pi*I)*x) == Si(x)
assert Shi(exp_polar(2*pi*I)*x) == Shi(x)
assert Shi(exp_polar(-2*pi*I)*x) == Shi(x)
assert Si(oo) == pi/2
assert Si(-oo) == -pi/2
assert Shi(oo) == oo
assert Shi(-oo) == -oo
assert mytd(Si(x), sin(x)/x, x)
assert mytd(Shi(x), sinh(x)/x, x)
assert mytn(Si(x), Si(x).rewrite(Ei),
-I*(-Ei(x*exp_polar(-I*pi/2))/2
+ Ei(x*exp_polar(I*pi/2))/2 - I*pi) + pi/2, x)
assert mytn(Si(x), Si(x).rewrite(expint),
-I*(-expint(1, x*exp_polar(-I*pi/2))/2 +
expint(1, x*exp_polar(I*pi/2))/2) + pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(Ei),
Ei(x)/2 - Ei(x*exp_polar(I*pi))/2 + I*pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(expint),
expint(1, x)/2 - expint(1, x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Si)
assert tn_arg(Shi)
assert Si(x).nseries(x, n=8) == \
x - x**3/18 + x**5/600 - x**7/35280 + O(x**9)
assert Shi(x).nseries(x, n=8) == \
x + x**3/18 + x**5/600 + x**7/35280 + O(x**9)
assert Si(sin(x)).nseries(x, n=5) == x - 2*x**3/9 + 17*x**5/450 + O(x**6)
assert Si(x).nseries(x, 1, n=3) == \
Si(1) + x*sin(1) + x**2*(-sin(1)/2 + cos(1)/2) + O(x**3)
def test_ci():
m1 = exp_polar(I*pi)
m1_ = exp_polar(-I*pi)
pI = exp_polar(I*pi/2)
mI = exp_polar(-I*pi/2)
assert Ci(m1*x) == Ci(x) + I*pi
assert Ci(m1_*x) == Ci(x) - I*pi
assert Ci(pI*x) == Chi(x) + I*pi/2
assert Ci(mI*x) == Chi(x) - I*pi/2
assert Chi(m1*x) == Chi(x) + I*pi
assert Chi(m1_*x) == Chi(x) - I*pi
assert Chi(pI*x) == Ci(x) + I*pi/2
assert Chi(mI*x) == Ci(x) - I*pi/2
assert Ci(exp_polar(2*I*pi)*x) == Ci(x) + 2*I*pi
assert Chi(exp_polar(-2*I*pi)*x) == Chi(x) - 2*I*pi
assert Chi(exp_polar(2*I*pi)*x) == Chi(x) + 2*I*pi
assert Ci(exp_polar(-2*I*pi)*x) == Ci(x) - 2*I*pi
assert Ci(oo) == 0
assert Ci(-oo) == I*pi
assert Chi(oo) == oo
assert Chi(-oo) == oo
assert mytd(Ci(x), cos(x)/x, x)
assert mytd(Chi(x), cosh(x)/x, x)
assert mytn(Ci(x), Ci(x).rewrite(Ei),
Ei(x*exp_polar(-I*pi/2))/2 + Ei(x*exp_polar(I*pi/2))/2, x)
assert mytn(Chi(x), Chi(x).rewrite(Ei),
Ei(x)/2 + Ei(x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Ci)
assert tn_arg(Chi)
from sympy import O, EulerGamma, log, limit
assert Ci(x).nseries(x, n=4) == \
EulerGamma + log(x) - x**2/4 + x**4/96 + O(x**5)
assert Chi(x).nseries(x, n=4) == \
EulerGamma + log(x) + x**2/4 + x**4/96 + O(x**5)
assert limit(log(x) - Ci(2*x), x, 0) == -log(2) - EulerGamma
def test_fresnel():
assert fresnels(0) == 0
assert fresnels(oo) == S.Half
assert fresnels(-oo) == -S.Half
assert fresnels(z) == fresnels(z)
assert fresnels(-z) == -fresnels(z)
assert fresnels(I*z) == -I*fresnels(z)
assert fresnels(-I*z) == I*fresnels(z)
assert conjugate(fresnels(z)) == fresnels(conjugate(z))
assert fresnels(z).diff(z) == sin(pi*z**2/2)
assert fresnels(z).rewrite(erf) == (S.One + I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnels(z).rewrite(hyper) == \
pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
assert fresnels(z).series(z, n=15) == \
pi*z**3/6 - pi**3*z**7/336 + pi**5*z**11/42240 + O(z**15)
assert fresnels(w).is_real is True
assert fresnels(z).as_real_imag() == \
((fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnels(2 + 3*I).as_real_imag() == (
fresnels(2 + 3*I)/2 + fresnels(2 - 3*I)/2,
I*(fresnels(2 - 3*I) - fresnels(2 + 3*I))/2
)
assert expand_func(integrate(fresnels(z), z)) == \
z*fresnels(z) + cos(pi*z**2/2)/pi
assert fresnels(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(9)/4) * \
meijerg(((), (1,)), ((S(3)/4,),
(S(1)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(3)/4)*(z**2)**(S(3)/4))
assert fresnelc(0) == 0
assert fresnelc(oo) == S.Half
assert fresnelc(-oo) == -S.Half
assert fresnelc(z) == fresnelc(z)
assert fresnelc(-z) == -fresnelc(z)
assert fresnelc(I*z) == I*fresnelc(z)
assert fresnelc(-I*z) == -I*fresnelc(z)
assert conjugate(fresnelc(z)) == fresnelc(conjugate(z))
assert fresnelc(z).diff(z) == cos(pi*z**2/2)
assert fresnelc(z).rewrite(erf) == (S.One - I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnelc(z).rewrite(hyper) == \
z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
assert fresnelc(z).series(z, n=15) == \
z - pi**2*z**5/40 + pi**4*z**9/3456 - pi**6*z**13/599040 + O(z**15)
assert fresnelc(w).is_real is True
assert fresnelc(z).as_real_imag() == \
((fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnelc(2 + 3*I).as_real_imag() == (
fresnelc(2 - 3*I)/2 + fresnelc(2 + 3*I)/2,
I*(fresnelc(2 - 3*I) - fresnelc(2 + 3*I))/2
)
assert expand_func(integrate(fresnelc(z), z)) == \
z*fresnelc(z) - sin(pi*z**2/2)/pi
assert fresnelc(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(3)/4) * \
meijerg(((), (1,)), ((S(1)/4,),
(S(3)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(1)/4)*(z**2)**(S(1)/4))
from sympy.utilities.randtest import test_numerically
test_numerically(re(fresnels(z)), fresnels(z).as_real_imag()[0], z)
test_numerically(im(fresnels(z)), fresnels(z).as_real_imag()[1], z)
test_numerically(fresnels(z), fresnels(z).rewrite(hyper), z)
test_numerically(fresnels(z), fresnels(z).rewrite(meijerg), z)
test_numerically(re(fresnelc(z)), fresnelc(z).as_real_imag()[0], z)
test_numerically(im(fresnelc(z)), fresnelc(z).as_real_imag()[1], z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(hyper), z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(meijerg), z)
| gpl-3.0 | -2,145,602,947,358,987,500 | 34.180404 | 102 | 0.530878 | false |
DaveA50/lbry | lbrynet/reflector/client/client.py | 1 | 16353 | """
The reflector protocol (all dicts encoded in json):
Client Handshake (sent once per connection, at the start of the connection):
{
'version': 0,
}
Server Handshake (sent once per connection, after receiving the client handshake):
{
'version': 0,
}
Client Info Request:
{
'blob_hash': "<blob_hash>",
'blob_size': <blob_size>
}
Server Info Response (sent in response to Client Info Request):
{
'send_blob': True|False
}
If response is 'YES', client may send a Client Blob Request or a Client Info Request.
If response is 'NO', client may only send a Client Info Request
Client Blob Request:
{} # Yes, this is an empty dictionary, in case something needs to go here in the future
<raw blob_data> # this blob data must match the info sent in the most recent Client Info Request
Server Blob Response (sent in response to Client Blob Request):
{
'received_blob': True
}
Client may now send another Client Info Request
"""
import json
import logging
from twisted.protocols.basic import FileSender
from twisted.internet.protocol import Protocol, ClientFactory
from twisted.internet import defer, error
log = logging.getLogger(__name__)
class IncompleteResponseError(Exception):
pass
class EncryptedFileReflectorClient(Protocol):
# Protocol stuff
def connectionMade(self):
self.blob_manager = self.factory.blob_manager
self.response_buff = ''
self.outgoing_buff = ''
self.blob_hashes_to_send = []
self.next_blob_to_send = None
self.blob_read_handle = None
self.received_handshake_response = False
self.protocol_version = None
self.file_sender = None
self.producer = None
self.streaming = False
d = self.get_blobs_to_send(self.factory.stream_info_manager, self.factory.stream_hash)
d.addCallback(lambda _: self.send_handshake())
d.addErrback(lambda err: log.warning("An error occurred immediately: %s", err.getTraceback()))
def dataReceived(self, data):
log.debug('Recieved %s', data)
self.response_buff += data
try:
msg = self.parse_response(self.response_buff)
except IncompleteResponseError:
pass
else:
self.response_buff = ''
d = self.handle_response(msg)
d.addCallback(lambda _: self.send_next_request())
d.addErrback(self.response_failure_handler)
def connectionLost(self, reason):
if reason.check(error.ConnectionDone):
log.debug('Finished sending data via reflector')
self.factory.finished_deferred.callback(True)
else:
log.debug('reflector finished: %s', reason)
self.factory.finished_deferred.callback(reason)
# IConsumer stuff
def registerProducer(self, producer, streaming):
self.producer = producer
self.streaming = streaming
if self.streaming is False:
from twisted.internet import reactor
reactor.callLater(0, self.producer.resumeProducing)
def unregisterProducer(self):
self.producer = None
def write(self, data):
self.transport.write(data)
if self.producer is not None and self.streaming is False:
from twisted.internet import reactor
reactor.callLater(0, self.producer.resumeProducing)
def get_blobs_to_send(self, stream_info_manager, stream_hash):
log.debug('Getting blobs from stream hash: %s', stream_hash)
d = stream_info_manager.get_blobs_for_stream(stream_hash)
def set_blobs(blob_hashes):
for blob_hash, position, iv, length in blob_hashes:
log.info("Preparing to send %s", blob_hash)
if blob_hash is not None:
self.blob_hashes_to_send.append(blob_hash)
d.addCallback(set_blobs)
d.addCallback(lambda _: stream_info_manager.get_sd_blob_hashes_for_stream(stream_hash))
def set_sd_blobs(sd_blob_hashes):
for sd_blob_hash in sd_blob_hashes:
self.blob_hashes_to_send.append(sd_blob_hash)
d.addCallback(set_sd_blobs)
return d
def send_handshake(self):
log.debug('Sending handshake')
self.write(json.dumps({'version': 0}))
def parse_response(self, buff):
try:
return json.loads(buff)
except ValueError:
raise IncompleteResponseError()
def response_failure_handler(self, err):
log.warning("An error occurred handling the response: %s", err.getTraceback())
def handle_response(self, response_dict):
if self.received_handshake_response is False:
return self.handle_handshake_response(response_dict)
else:
return self.handle_normal_response(response_dict)
def set_not_uploading(self):
if self.next_blob_to_send is not None:
self.next_blob_to_send.close_read_handle(self.read_handle)
self.read_handle = None
self.next_blob_to_send = None
self.file_sender = None
return defer.succeed(None)
def start_transfer(self):
self.write(json.dumps({}))
assert self.read_handle is not None, "self.read_handle was None when trying to start the transfer"
d = self.file_sender.beginFileTransfer(self.read_handle, self)
return d
def handle_handshake_response(self, response_dict):
if 'version' not in response_dict:
raise ValueError("Need protocol version number!")
self.protocol_version = int(response_dict['version'])
if self.protocol_version != 0:
raise ValueError("I can't handle protocol version {}!".format(self.protocol_version))
self.received_handshake_response = True
return defer.succeed(True)
def handle_normal_response(self, response_dict):
if self.file_sender is None: # Expecting Server Info Response
if 'send_blob' not in response_dict:
raise ValueError("I don't know whether to send the blob or not!")
if response_dict['send_blob'] is True:
self.file_sender = FileSender()
return defer.succeed(True)
else:
return self.set_not_uploading()
else: # Expecting Server Blob Response
if 'received_blob' not in response_dict:
raise ValueError("I don't know if the blob made it to the intended destination!")
else:
return self.set_not_uploading()
def open_blob_for_reading(self, blob):
if blob.is_validated():
read_handle = blob.open_for_reading()
if read_handle is not None:
log.debug('Getting ready to send %s', blob.blob_hash)
self.next_blob_to_send = blob
self.read_handle = read_handle
return None
raise ValueError("Couldn't open that blob for some reason. blob_hash: {}".format(blob.blob_hash))
def send_blob_info(self):
log.info("Send blob info for %s", self.next_blob_to_send.blob_hash)
assert self.next_blob_to_send is not None, "need to have a next blob to send at this point"
log.debug('sending blob info')
self.write(json.dumps({
'blob_hash': self.next_blob_to_send.blob_hash,
'blob_size': self.next_blob_to_send.length
}))
def send_next_request(self):
if self.file_sender is not None:
# send the blob
log.debug('Sending the blob')
return self.start_transfer()
elif self.blob_hashes_to_send:
# open the next blob to send
blob_hash = self.blob_hashes_to_send[0]
log.debug('No current blob, sending the next one: %s', blob_hash)
self.blob_hashes_to_send = self.blob_hashes_to_send[1:]
d = self.blob_manager.get_blob(blob_hash, True)
d.addCallback(self.open_blob_for_reading)
# send the server the next blob hash + length
d.addCallback(lambda _: self.send_blob_info())
return d
else:
# close connection
log.debug('No more blob hashes, closing connection')
self.transport.loseConnection()
class EncryptedFileReflectorClientFactory(ClientFactory):
protocol = EncryptedFileReflectorClient
def __init__(self, blob_manager, stream_info_manager, stream_hash):
self.blob_manager = blob_manager
self.stream_info_manager = stream_info_manager
self.stream_hash = stream_hash
self.p = None
self.finished_deferred = defer.Deferred()
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
self.p = p
return p
def startFactory(self):
log.debug('Starting reflector factory')
ClientFactory.startFactory(self)
def startedConnecting(self, connector):
log.debug('Started connecting')
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
log.debug("connection lost: %s", reason)
def clientConnectionFailed(self, connector, reason):
log.debug("connection failed: %s", reason)
class BlobReflectorClient(Protocol):
# Protocol stuff
def connectionMade(self):
self.blob_manager = self.factory.blob_manager
self.response_buff = ''
self.outgoing_buff = ''
self.blob_hashes_to_send = self.factory.blobs
self.next_blob_to_send = None
self.blob_read_handle = None
self.received_handshake_response = False
self.protocol_version = None
self.file_sender = None
self.producer = None
self.streaming = False
d = self.send_handshake()
d.addErrback(lambda err: log.warning("An error occurred immediately: %s", err.getTraceback()))
def dataReceived(self, data):
log.debug('Recieved %s', data)
self.response_buff += data
try:
msg = self.parse_response(self.response_buff)
except IncompleteResponseError:
pass
else:
self.response_buff = ''
d = self.handle_response(msg)
d.addCallback(lambda _: self.send_next_request())
d.addErrback(self.response_failure_handler)
def connectionLost(self, reason):
if reason.check(error.ConnectionDone):
log.debug('Finished sending data via reflector')
self.factory.finished_deferred.callback(True)
else:
log.debug('reflector finished: %s', reason)
self.factory.finished_deferred.callback(reason)
# IConsumer stuff
def registerProducer(self, producer, streaming):
self.producer = producer
self.streaming = streaming
if self.streaming is False:
from twisted.internet import reactor
reactor.callLater(0, self.producer.resumeProducing)
def unregisterProducer(self):
self.producer = None
def write(self, data):
self.transport.write(data)
if self.producer is not None and self.streaming is False:
from twisted.internet import reactor
reactor.callLater(0, self.producer.resumeProducing)
def send_handshake(self):
log.debug('Sending handshake')
self.write(json.dumps({'version': 0}))
return defer.succeed(None)
def parse_response(self, buff):
try:
return json.loads(buff)
except ValueError:
raise IncompleteResponseError()
def response_failure_handler(self, err):
log.warning("An error occurred handling the response: %s", err.getTraceback())
def handle_response(self, response_dict):
if self.received_handshake_response is False:
return self.handle_handshake_response(response_dict)
else:
return self.handle_normal_response(response_dict)
def set_not_uploading(self):
if self.next_blob_to_send is not None:
self.next_blob_to_send.close_read_handle(self.read_handle)
self.read_handle = None
self.next_blob_to_send = None
self.file_sender = None
return defer.succeed(None)
def start_transfer(self):
self.write(json.dumps({}))
assert self.read_handle is not None, "self.read_handle was None when trying to start the transfer"
d = self.file_sender.beginFileTransfer(self.read_handle, self)
return d
def handle_handshake_response(self, response_dict):
if 'version' not in response_dict:
raise ValueError("Need protocol version number!")
self.protocol_version = int(response_dict['version'])
if self.protocol_version != 0:
raise ValueError("I can't handle protocol version {}!".format(self.protocol_version))
self.received_handshake_response = True
return defer.succeed(True)
def handle_normal_response(self, response_dict):
if self.file_sender is None: # Expecting Server Info Response
if 'send_blob' not in response_dict:
raise ValueError("I don't know whether to send the blob or not!")
if response_dict['send_blob'] is True:
self.file_sender = FileSender()
return defer.succeed(True)
else:
return self.set_not_uploading()
else: # Expecting Server Blob Response
if 'received_blob' not in response_dict:
raise ValueError("I don't know if the blob made it to the intended destination!")
else:
return self.set_not_uploading()
def open_blob_for_reading(self, blob):
if blob.is_validated():
read_handle = blob.open_for_reading()
if read_handle is not None:
log.debug('Getting ready to send %s', blob.blob_hash)
self.next_blob_to_send = blob
self.read_handle = read_handle
return None
raise ValueError("Couldn't open that blob for some reason. blob_hash: {}".format(blob.blob_hash))
def send_blob_info(self):
log.info("Send blob info for %s", self.next_blob_to_send.blob_hash)
assert self.next_blob_to_send is not None, "need to have a next blob to send at this point"
log.debug('sending blob info')
self.write(json.dumps({
'blob_hash': self.next_blob_to_send.blob_hash,
'blob_size': self.next_blob_to_send.length
}))
def send_next_request(self):
if self.file_sender is not None:
# send the blob
log.debug('Sending the blob')
return self.start_transfer()
elif self.blob_hashes_to_send:
# open the next blob to send
blob_hash = self.blob_hashes_to_send[0]
log.debug('No current blob, sending the next one: %s', blob_hash)
self.blob_hashes_to_send = self.blob_hashes_to_send[1:]
d = self.blob_manager.get_blob(blob_hash, True)
d.addCallback(self.open_blob_for_reading)
# send the server the next blob hash + length
d.addCallback(lambda _: self.send_blob_info())
return d
else:
# close connection
log.debug('No more blob hashes, closing connection')
self.transport.loseConnection()
class BlobReflectorClientFactory(ClientFactory):
protocol = BlobReflectorClient
def __init__(self, blob_manager, blobs):
self.blob_manager = blob_manager
self.blobs = blobs
self.p = None
self.finished_deferred = defer.Deferred()
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
self.p = p
return p
def startFactory(self):
log.debug('Starting reflector factory')
ClientFactory.startFactory(self)
def startedConnecting(self, connector):
log.debug('Started connecting')
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
log.debug("connection lost: %s", reason)
def clientConnectionFailed(self, connector, reason):
log.debug("connection failed: %s", reason)
| mit | -1,718,397,455,770,459,100 | 35.019824 | 106 | 0.624411 | false |
rsalmei/clearly | tests/unit/server/test_server.py | 1 | 4150 | from unittest import mock
import pytest
from clearly.protos.clearly_pb2 import CaptureRequest, FilterTasksRequest, FilterWorkersRequest, \
Null, PatternFilter, RealtimeMessage, TaskMessage, WorkerMessage
from clearly.server.server import RPCService
@pytest.fixture
def mocked_rpc():
with mock.patch('clearly.server.server.RPCService._log_request'):
yield RPCService(mock.Mock(), mock.MagicMock(), mock.MagicMock())
def test_server_capture_realtime(tristate, mocked_rpc):
capture_data, queue_data = {}, []
if tristate is not True: # enters in False and None
capture_data.update(tasks_capture=PatternFilter(pattern='tp', negate=True))
queue_data.append(TaskMessage(timestamp=123.1))
if tristate is not False: # enters in True and None
capture_data.update(workers_capture=PatternFilter(pattern='wp', negate=False))
queue_data.append(WorkerMessage(timestamp=123.2))
request = CaptureRequest(**capture_data)
mdt = mocked_rpc.dispatcher_tasks.streaming_capture
mdw = mocked_rpc.dispatcher_workers.streaming_capture
with mock.patch('queue.Queue.get') as mqg:
mqg.side_effect = queue_data
gen = mocked_rpc.capture_realtime(request, None)
if tristate is not True:
result = next(gen)
mdt.assert_called_once_with(PatternFilter(pattern='tp', negate=True), mock.ANY)
assert result == RealtimeMessage(task=TaskMessage(timestamp=123.1))
if tristate is not False:
result = next(gen)
mdw.assert_called_once_with(PatternFilter(pattern='wp', negate=False), mock.ANY)
assert result == RealtimeMessage(worker=WorkerMessage(timestamp=123.2))
def test_server_filter_tasks(mocked_rpc):
request = FilterTasksRequest(tasks_filter=PatternFilter(pattern='tp', negate=True))
task = mock.Mock()
mocked_rpc.memory.tasks_by_time.return_value = (('_', task),)
with mock.patch('clearly.server.server.accept_task') as ma, \
mock.patch('clearly.server.server.obj_to_message') as otm:
ma.return_value = True
otm.return_value = 'asd'
gen = mocked_rpc.filter_tasks(request, None)
result = next(gen)
otm.assert_called_once_with(task, TaskMessage)
assert result == 'asd'
def test_server_filter_tasks_empty(mocked_rpc):
mlmt = mocked_rpc.memory.tasks_by_time
mlmt.return_value = ()
gen = mocked_rpc.filter_tasks(FilterTasksRequest(), None)
with pytest.raises(StopIteration):
next(gen)
def test_server_filter_workers(mocked_rpc):
request = FilterWorkersRequest(workers_filter=PatternFilter(pattern='wp', negate=True))
worker = mock.Mock()
mocked_rpc.memory.workers.values.return_value = (worker,)
with mock.patch('clearly.server.server.accept_worker') as ma, \
mock.patch('clearly.server.server.obj_to_message') as otm:
ma.return_value = True
otm.return_value = 'asd'
gen = mocked_rpc.filter_workers(request, None)
result = next(gen)
otm.assert_called_once_with(worker, WorkerMessage)
assert result == 'asd'
def test_server_filter_workers_empty(mocked_rpc):
mocked_rpc.memory.workers.values.return_value = ()
gen = mocked_rpc.filter_workers(FilterWorkersRequest(), None)
with pytest.raises(StopIteration):
next(gen)
def test_server_seen_tasks(mocked_rpc):
expected = ('t1', 't2')
mlmtt = mocked_rpc.memory.task_types
mlmtt.return_value = expected
result = mocked_rpc.seen_tasks(Null(), None)
assert result.task_types == list(expected)
def test_server_reset_tasks(mocked_rpc):
mocked_rpc.reset_tasks(Null(), None)
assert mocked_rpc.memory.clear_tasks.call_count == 1
def test_server_get_stats(mocked_rpc):
mlm = mocked_rpc.memory
mlm.task_count = 1
mlm.event_count = 2
mlm.tasks.__len__ = mock.Mock(return_value=3)
mlm.workers.__len__ = mock.Mock(return_value=4)
result = mocked_rpc.get_metrics(Null(), None)
assert result.task_count == 1
assert result.event_count == 2
assert result.len_tasks == 3
assert result.len_workers == 4
| mit | 741,390,945,285,774,200 | 33.583333 | 98 | 0.682169 | false |
okajun35/python_for_android_doc_ja | pythonforandroid/bootstrap.py | 2 | 11512 | from os.path import (join, dirname, isdir, splitext, basename, realpath)
from os import listdir, mkdir
import sh
import glob
import json
import importlib
from pythonforandroid.logger import (warning, shprint, info, logger,
debug)
from pythonforandroid.util import (current_directory, ensure_dir,
temp_directory, which)
from pythonforandroid.recipe import Recipe
class Bootstrap(object):
'''An Android project template, containing recipe stuff for
compilation and templated fields for APK info.
'''
name = ''
jni_subdir = '/jni'
ctx = None
bootstrap_dir = None
build_dir = None
dist_dir = None
dist_name = None
distribution = None
recipe_depends = ['sdl2']
can_be_chosen_automatically = True
'''Determines whether the bootstrap can be chosen as one that
satisfies user requirements. If False, it will not be returned
from Bootstrap.get_bootstrap_from_recipes.
'''
# Other things a Bootstrap might need to track (maybe separately):
# ndk_main.c
# whitelist.txt
# blacklist.txt
@property
def dist_dir(self):
'''The dist dir at which to place the finished distribution.'''
if self.distribution is None:
warning('Tried to access {}.dist_dir, but {}.distribution '
'is None'.format(self, self))
exit(1)
return self.distribution.dist_dir
@property
def jni_dir(self):
return self.name + self.jni_subdir
def check_recipe_choices(self):
'''Checks what recipes are being built to see which of the alternative
and optional dependencies are being used,
and returns a list of these.'''
recipes = []
built_recipes = self.ctx.recipe_build_order
for recipe in self.recipe_depends:
if isinstance(recipe, (tuple, list)):
for alternative in recipe:
if alternative in built_recipes:
recipes.append(alternative)
break
return sorted(recipes)
def get_build_dir_name(self):
choices = self.check_recipe_choices()
dir_name = '-'.join([self.name] + choices)
return dir_name
def get_build_dir(self):
return join(self.ctx.build_dir, 'bootstrap_builds', self.get_build_dir_name())
def get_dist_dir(self, name):
return join(self.ctx.dist_dir, name)
@property
def name(self):
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
def prepare_build_dir(self):
'''Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.'''
self.build_dir = self.get_build_dir()
shprint(sh.cp, '-r',
join(self.bootstrap_dir, 'build'),
self.build_dir)
if self.ctx.symlink_java_src:
info('Symlinking java src instead of copying')
shprint(sh.rm, '-r', join(self.build_dir, 'src'))
shprint(sh.mkdir, join(self.build_dir, 'src'))
for dirn in listdir(join(self.bootstrap_dir, 'build', 'src')):
shprint(sh.ln, '-s', join(self.bootstrap_dir, 'build', 'src', dirn),
join(self.build_dir, 'src'))
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api))
def prepare_dist_dir(self, name):
# self.dist_dir = self.get_dist_dir(name)
ensure_dir(self.dist_dir)
def run_distribute(self):
# print('Default bootstrap being used doesn\'t know how '
# 'to distribute...failing.')
# exit(1)
with current_directory(self.dist_dir):
info('Saving distribution info')
with open('dist_info.json', 'w') as fileh:
json.dump({'dist_name': self.ctx.dist_name,
'bootstrap': self.ctx.bootstrap.name,
'archs': [arch.arch for arch in self.ctx.archs],
'recipes': self.ctx.recipe_build_order + self.ctx.python_modules},
fileh)
@classmethod
def list_bootstraps(cls):
'''Find all the available bootstraps and return them.'''
forbidden_dirs = ('__pycache__', )
bootstraps_dir = join(dirname(__file__), 'bootstraps')
for name in listdir(bootstraps_dir):
if name in forbidden_dirs:
continue
filen = join(bootstraps_dir, name)
if isdir(filen):
yield name
@classmethod
def get_bootstrap_from_recipes(cls, recipes, ctx):
'''Returns a bootstrap whose recipe requirements do not conflict with
the given recipes.'''
info('Trying to find a bootstrap that matches the given recipes.')
bootstraps = [cls.get_bootstrap(name, ctx)
for name in cls.list_bootstraps()]
acceptable_bootstraps = []
for bs in bootstraps:
if not bs.can_be_chosen_automatically:
continue
possible_dependency_lists = expand_dependencies(bs.recipe_depends)
for possible_dependencies in possible_dependency_lists:
ok = True
for recipe in possible_dependencies:
recipe = Recipe.get_recipe(recipe, ctx)
if any([conflict in recipes for conflict in recipe.conflicts]):
ok = False
break
for recipe in recipes:
try:
recipe = Recipe.get_recipe(recipe, ctx)
except IOError:
conflicts = []
else:
conflicts = recipe.conflicts
if any([conflict in possible_dependencies
for conflict in conflicts]):
ok = False
break
if ok:
acceptable_bootstraps.append(bs)
info('Found {} acceptable bootstraps: {}'.format(
len(acceptable_bootstraps),
[bs.name for bs in acceptable_bootstraps]))
if acceptable_bootstraps:
info('Using the first of these: {}'
.format(acceptable_bootstraps[0].name))
return acceptable_bootstraps[0]
return None
@classmethod
def get_bootstrap(cls, name, ctx):
'''Returns an instance of a bootstrap with the given name.
This is the only way you should access a bootstrap class, as
it sets the bootstrap directory correctly.
'''
# AND: This method will need to check user dirs, and access
# bootstraps in a slightly different way
if name is None:
return None
if not hasattr(cls, 'bootstraps'):
cls.bootstraps = {}
if name in cls.bootstraps:
return cls.bootstraps[name]
mod = importlib.import_module('pythonforandroid.bootstraps.{}'
.format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
bootstrap = mod.bootstrap
bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name)
bootstrap.ctx = ctx
return bootstrap
def distribute_libs(self, arch, src_dirs, wildcard='*'):
'''Copy existing arch libs from build dirs to current dist dir.'''
info('Copying libs')
tgt_dir = join('libs', arch.arch)
ensure_dir(tgt_dir)
for src_dir in src_dirs:
for lib in glob.glob(join(src_dir, wildcard)):
shprint(sh.cp, '-a', lib, tgt_dir)
def distribute_javaclasses(self, javaclass_dir):
'''Copy existing javaclasses from build dir to current dist dir.'''
info('Copying java files')
for filename in glob.glob(javaclass_dir):
shprint(sh.cp, '-a', filename, 'src')
def distribute_aars(self, arch):
'''Process existing .aar bundles and copy to current dist dir.'''
info('Unpacking aars')
for aar in glob.glob(join(self.ctx.aars_dir, '*.aar')):
self._unpack_aar(aar, arch)
def _unpack_aar(self, aar, arch):
'''Unpack content of .aar bundle and copy to current dist dir.'''
with temp_directory() as temp_dir:
name = splitext(basename(aar))[0]
jar_name = name + '.jar'
info("unpack {} aar".format(name))
debug(" from {}".format(aar))
debug(" to {}".format(temp_dir))
shprint(sh.unzip, '-o', aar, '-d', temp_dir)
jar_src = join(temp_dir, 'classes.jar')
jar_tgt = join('libs', jar_name)
debug("copy {} jar".format(name))
debug(" from {}".format(jar_src))
debug(" to {}".format(jar_tgt))
ensure_dir('libs')
shprint(sh.cp, '-a', jar_src, jar_tgt)
so_src_dir = join(temp_dir, 'jni', arch.arch)
so_tgt_dir = join('libs', arch.arch)
debug("copy {} .so".format(name))
debug(" from {}".format(so_src_dir))
debug(" to {}".format(so_tgt_dir))
ensure_dir(so_tgt_dir)
so_files = glob.glob(join(so_src_dir, '*.so'))
for f in so_files:
shprint(sh.cp, '-a', f, so_tgt_dir)
def strip_libraries(self, arch):
info('Stripping libraries')
if self.ctx.python_recipe.from_crystax:
info('Python was loaded from CrystaX, skipping strip')
return
env = arch.get_env()
strip = which('arm-linux-androideabi-strip', env['PATH'])
if strip is None:
warning('Can\'t find strip in PATH...')
return
strip = sh.Command(strip)
filens = shprint(sh.find, join(self.dist_dir, 'private'),
join(self.dist_dir, 'libs'),
'-iname', '*.so', _env=env).stdout.decode('utf-8')
logger.info('Stripping libraries in private dir')
for filen in filens.split('\n'):
try:
strip(filen, _env=env)
except sh.ErrorReturnCode_1:
logger.debug('Failed to strip ' + filen)
def fry_eggs(self, sitepackages):
info('Frying eggs in {}'.format(sitepackages))
for d in listdir(sitepackages):
rd = join(sitepackages, d)
if isdir(rd) and d.endswith('.egg'):
info(' ' + d)
files = [join(rd, f) for f in listdir(rd) if f != 'EGG-INFO']
if files:
shprint(sh.mv, '-t', sitepackages, *files)
shprint(sh.rm, '-rf', d)
def expand_dependencies(recipes):
recipe_lists = [[]]
for recipe in recipes:
if isinstance(recipe, (tuple, list)):
new_recipe_lists = []
for alternative in recipe:
for old_list in recipe_lists:
new_list = [i for i in old_list]
new_list.append(alternative)
new_recipe_lists.append(new_list)
recipe_lists = new_recipe_lists
else:
for old_list in recipe_lists:
old_list.append(recipe)
return recipe_lists
| mit | 8,087,446,740,981,499,000 | 38.156463 | 93 | 0.552641 | false |
yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/alarm_policy/modify_alarm_policy_action_attributes.py | 1 | 2414 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class ModifyAlarmPolicyActionAttributesAction(BaseAction):
action = 'ModifyAlarmPolicyActionAttributes'
command = 'modify-alarm-policy-action-attributes'
usage = '%(prog)s [-a <alarm_policy_action>...] [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-a", "--alarm-policy-action", dest="alarm_policy_action",
action="store", type=str, default='',
help="the ID of the alarm policy action whose content you want to update.")
parser.add_argument("-A", "--trigger-action", dest="trigger_action",
action="store", type=str, default=None,
help="the ID of the trigger action.")
parser.add_argument("-s", "--trigger-status", dest="trigger_status",
action="store", type=str, default=None,
help="when the monitor alarm state becomes 'ok' or 'alarm', "
"the message will be sent to this trigger list.")
@classmethod
def build_directive(cls, options):
if options.alarm_policy_action == '':
print('error: alarm_policy_action should be specified.')
return None
directive = {
"alarm_policy_action": options.alarm_policy_action,
"trigger_action": options.trigger_action,
"trigger_status": options.trigger_status,
}
return directive
| apache-2.0 | -5,971,013,880,356,076,000 | 43.703704 | 103 | 0.57208 | false |
Tesora-Release/tesora-trove | trove/tests/scenario/runners/instance_delete_runners.py | 2 | 2036 | # Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import proboscis
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceDeleteRunner(TestRunner):
def __init__(self):
super(InstanceDeleteRunner, self).__init__()
def run_instance_delete(self, expected_http_code=202):
if self.has_do_not_delete_instance:
self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
"specified, skipping delete...")
raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
"was specified.")
self.assert_instance_delete(self.instance_info.id, expected_http_code)
def assert_instance_delete(self, instance_id, expected_http_code):
self.report.log("Testing delete on instance: %s" % instance_id)
self.auth_client.instances.delete(instance_id)
self.assert_client_code(expected_http_code)
def run_instance_delete_wait(self, expected_states=['SHUTDOWN']):
if self.has_do_not_delete_instance:
self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
"specified, skipping delete wait...")
raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
"was specified.")
self.assert_all_gone(self.instance_info.id, expected_states[-1])
self.assert_server_group_gone(self.instance_info.srv_grp_id)
| apache-2.0 | -5,106,288,940,637,070,000 | 41.416667 | 78 | 0.658153 | false |
openstack/manila | manila/tests/db/sqlalchemy/test_api.py | 1 | 194905 | # Copyright 2013 OpenStack Foundation
# Copyright (c) 2014 NetApp, Inc.
# Copyright (c) 2015 Rushil Chugh
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Testing of SQLAlchemy backend."""
import copy
import datetime
import random
from unittest import mock
import ddt
from oslo_db import exception as db_exception
from oslo_utils import timeutils
from oslo_utils import uuidutils
from manila.common import constants
from manila import context
from manila.db.sqlalchemy import api as db_api
from manila.db.sqlalchemy import models
from manila import exception
from manila import quota
from manila import test
from manila.tests import db_utils
QUOTAS = quota.QUOTAS
security_service_dict = {
'id': 'fake id',
'project_id': 'fake project',
'type': 'ldap',
'dns_ip': 'fake dns',
'server': 'fake ldap server',
'domain': 'fake ldap domain',
'ou': 'fake ldap ou',
'user': 'fake user',
'password': 'fake password',
'name': 'whatever',
'description': 'nevermind',
}
class BaseDatabaseAPITestCase(test.TestCase):
def _check_fields(self, expected, actual):
for key in expected:
self.assertEqual(expected[key], actual[key])
@ddt.ddt
class GenericDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(GenericDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
@ddt.unpack
@ddt.data(
{'values': {'test': 'fake'}, 'call_count': 1},
{'values': {'test': 'fake', 'id': 'fake'}, 'call_count': 0},
{'values': {'test': 'fake', 'fooid': 'fake'}, 'call_count': 1},
{'values': {'test': 'fake', 'idfoo': 'fake'}, 'call_count': 1},
)
def test_ensure_model_values_has_id(self, values, call_count):
self.mock_object(uuidutils, 'generate_uuid')
db_api.ensure_model_dict_has_id(values)
self.assertEqual(call_count, uuidutils.generate_uuid.call_count)
self.assertIn('id', values)
def test_custom_query(self):
share = db_utils.create_share()
share_access = db_utils.create_access(share_id=share['id'])
db_api.share_instance_access_delete(
self.ctxt, share_access.instance_mappings[0].id)
self.assertRaises(exception.NotFound, db_api.share_access_get,
self.ctxt, share_access.id)
@ddt.ddt
class ShareAccessDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(ShareAccessDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
@ddt.data(0, 3)
def test_share_access_get_all_for_share(self, len_rules):
share = db_utils.create_share()
rules = [db_utils.create_access(share_id=share['id'])
for i in range(0, len_rules)]
rule_ids = [r['id'] for r in rules]
result = db_api.share_access_get_all_for_share(self.ctxt, share['id'])
self.assertEqual(len_rules, len(result))
result_ids = [r['id'] for r in result]
self.assertEqual(rule_ids, result_ids)
def test_share_access_get_all_for_share_no_instance_mappings(self):
share = db_utils.create_share()
share_instance = share['instance']
rule = db_utils.create_access(share_id=share['id'])
# Mark instance mapping soft deleted
db_api.share_instance_access_update(
self.ctxt, rule['id'], share_instance['id'], {'deleted': "True"})
result = db_api.share_access_get_all_for_share(self.ctxt, share['id'])
self.assertEqual([], result)
def test_share_instance_access_update(self):
share = db_utils.create_share()
access = db_utils.create_access(share_id=share['id'])
instance_access_mapping = db_api.share_instance_access_get(
self.ctxt, access['id'], share.instance['id'])
self.assertEqual(constants.ACCESS_STATE_QUEUED_TO_APPLY,
access['state'])
self.assertIsNone(access['access_key'])
db_api.share_instance_access_update(
self.ctxt, access['id'], share.instance['id'],
{'state': constants.STATUS_ERROR, 'access_key': 'watson4heisman'})
instance_access_mapping = db_api.share_instance_access_get(
self.ctxt, access['id'], share.instance['id'])
access = db_api.share_access_get(self.ctxt, access['id'])
self.assertEqual(constants.STATUS_ERROR,
instance_access_mapping['state'])
self.assertEqual('watson4heisman', access['access_key'])
@ddt.data(True, False)
def test_share_access_get_all_for_instance_with_share_access_data(
self, with_share_access_data):
share = db_utils.create_share()
access_1 = db_utils.create_access(share_id=share['id'])
access_2 = db_utils.create_access(share_id=share['id'])
share_access_keys = ('access_to', 'access_type', 'access_level',
'share_id')
rules = db_api.share_access_get_all_for_instance(
self.ctxt, share.instance['id'],
with_share_access_data=with_share_access_data)
share_access_keys_present = True if with_share_access_data else False
actual_access_ids = [r['access_id'] for r in rules]
self.assertTrue(isinstance(actual_access_ids, list))
expected = [access_1['id'], access_2['id']]
self.assertEqual(len(expected), len(actual_access_ids))
for pool in expected:
self.assertIn(pool, actual_access_ids)
for rule in rules:
for key in share_access_keys:
self.assertEqual(share_access_keys_present, key in rule)
self.assertIn('state', rule)
def test_share_access_get_all_for_instance_with_filters(self):
share = db_utils.create_share()
new_share_instance = db_utils.create_share_instance(
share_id=share['id'])
access_1 = db_utils.create_access(share_id=share['id'])
access_2 = db_utils.create_access(share_id=share['id'])
share_access_keys = ('access_to', 'access_type', 'access_level',
'share_id')
db_api.share_instance_access_update(
self.ctxt, access_1['id'], new_share_instance['id'],
{'state': constants.STATUS_ACTIVE})
rules = db_api.share_access_get_all_for_instance(
self.ctxt, new_share_instance['id'],
filters={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY})
self.assertEqual(1, len(rules))
self.assertEqual(access_2['id'], rules[0]['access_id'])
for rule in rules:
for key in share_access_keys:
self.assertIn(key, rule)
def test_share_instance_access_delete(self):
share = db_utils.create_share()
access = db_utils.create_access(share_id=share['id'],
metadata={'key1': 'v1'})
instance_access_mapping = db_api.share_instance_access_get(
self.ctxt, access['id'], share.instance['id'])
db_api.share_instance_access_delete(
self.ctxt, instance_access_mapping['id'])
rules = db_api.share_access_get_all_for_instance(
self.ctxt, share.instance['id'])
self.assertEqual([], rules)
self.assertRaises(exception.NotFound, db_api.share_instance_access_get,
self.ctxt, access['id'], share['instance']['id'])
def test_one_share_with_two_share_instance_access_delete(self):
metadata = {'key2': 'v2', 'key3': 'v3'}
share = db_utils.create_share()
instance = db_utils.create_share_instance(share_id=share['id'])
access = db_utils.create_access(share_id=share['id'],
metadata=metadata)
instance_access_mapping1 = db_api.share_instance_access_get(
self.ctxt, access['id'], share.instance['id'])
instance_access_mapping2 = db_api.share_instance_access_get(
self.ctxt, access['id'], instance['id'])
self.assertEqual(instance_access_mapping1['access_id'],
instance_access_mapping2['access_id'])
db_api.share_instance_delete(self.ctxt, instance['id'])
get_accesses = db_api.share_access_get_all_for_share(self.ctxt,
share['id'])
self.assertEqual(1, len(get_accesses))
get_metadata = (
get_accesses[0].get('share_access_rules_metadata') or {})
get_metadata = {item['key']: item['value'] for item in get_metadata}
self.assertEqual(metadata, get_metadata)
self.assertEqual(access['id'], get_accesses[0]['id'])
db_api.share_instance_delete(self.ctxt, share['instance']['id'])
self.assertRaises(exception.NotFound,
db_api.share_instance_access_get,
self.ctxt, access['id'], share['instance']['id'])
get_accesses = db_api.share_access_get_all_for_share(self.ctxt,
share['id'])
self.assertEqual(0, len(get_accesses))
@ddt.data(True, False)
def test_share_instance_access_get_with_share_access_data(
self, with_share_access_data):
share = db_utils.create_share()
access = db_utils.create_access(share_id=share['id'])
instance_access = db_api.share_instance_access_get(
self.ctxt, access['id'], share['instance']['id'],
with_share_access_data=with_share_access_data)
for key in ('share_id', 'access_type', 'access_to', 'access_level',
'access_key'):
self.assertEqual(with_share_access_data, key in instance_access)
@ddt.data({'existing': {'access_type': 'cephx', 'access_to': 'alice'},
'new': {'access_type': 'user', 'access_to': 'alice'},
'result': False},
{'existing': {'access_type': 'user', 'access_to': 'bob'},
'new': {'access_type': 'user', 'access_to': 'bob'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.0.0.10/32'},
'new': {'access_type': 'ip', 'access_to': '10.0.0.10'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.10.0.11'},
'new': {'access_type': 'ip', 'access_to': '10.10.0.11'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': 'fd21::11'},
'new': {'access_type': 'ip', 'access_to': 'fd21::11'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': 'fd21::10'},
'new': {'access_type': 'ip', 'access_to': 'fd21::10/128'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.10.0.0/22'},
'new': {'access_type': 'ip', 'access_to': '10.10.0.0/24'},
'result': False},
{'existing': {'access_type': 'ip', 'access_to': '2620:52::/48'},
'new': {'access_type': 'ip',
'access_to': '2620:52:0:13b8::/64'},
'result': False})
@ddt.unpack
def test_share_access_check_for_existing_access(self, existing, new,
result):
share = db_utils.create_share()
db_utils.create_access(share_id=share['id'],
access_type=existing['access_type'],
access_to=existing['access_to'])
rule_exists = db_api.share_access_check_for_existing_access(
self.ctxt, share['id'], new['access_type'], new['access_to'])
self.assertEqual(result, rule_exists)
def test_share_access_get_all_for_share_with_metadata(self):
share = db_utils.create_share()
rules = [db_utils.create_access(
share_id=share['id'], metadata={'key1': i})
for i in range(0, 3)]
rule_ids = [r['id'] for r in rules]
result = db_api.share_access_get_all_for_share(self.ctxt, share['id'])
self.assertEqual(3, len(result))
result_ids = [r['id'] for r in result]
self.assertEqual(rule_ids, result_ids)
result = db_api.share_access_get_all_for_share(
self.ctxt, share['id'], {'metadata': {'key1': '2'}})
self.assertEqual(1, len(result))
self.assertEqual(rules[2]['id'], result[0]['id'])
def test_share_access_metadata_update(self):
share = db_utils.create_share()
new_metadata = {'key1': 'test_update', 'key2': 'v2'}
rule = db_utils.create_access(share_id=share['id'],
metadata={'key1': 'v1'})
result_metadata = db_api.share_access_metadata_update(
self.ctxt, rule['id'], metadata=new_metadata)
result = db_api.share_access_get(self.ctxt, rule['id'])
self.assertEqual(new_metadata, result_metadata)
metadata = result.get('share_access_rules_metadata')
if metadata:
metadata = {item['key']: item['value'] for item in metadata}
else:
metadata = {}
self.assertEqual(new_metadata, metadata)
@ddt.ddt
class ShareDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(ShareDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_share_filter_by_host_with_pools(self):
share_instances = [[
db_api.share_create(self.ctxt, {'host': value}).instance
for value in ('foo', 'foo#pool0')]]
db_utils.create_share()
self._assertEqualListsOfObjects(share_instances[0],
db_api.share_instances_get_all_by_host(
self.ctxt, 'foo'),
ignored_keys=['share_type',
'share_type_id',
'export_locations'])
def test_share_filter_all_by_host_with_pools_multiple_hosts(self):
share_instances = [[
db_api.share_create(self.ctxt, {'host': value}).instance
for value in ('foo', 'foo#pool0', 'foo', 'foo#pool1')]]
db_utils.create_share()
self._assertEqualListsOfObjects(share_instances[0],
db_api.share_instances_get_all_by_host(
self.ctxt, 'foo'),
ignored_keys=['share_type',
'share_type_id',
'export_locations'])
def test_share_filter_all_by_share_server(self):
share_network = db_utils.create_share_network()
share_server = db_utils.create_share_server(
share_network_id=share_network['id'])
share = db_utils.create_share(share_server_id=share_server['id'],
share_network_id=share_network['id'])
actual_result = db_api.share_get_all_by_share_server(
self.ctxt, share_server['id'])
self.assertEqual(1, len(actual_result))
self.assertEqual(share['id'], actual_result[0].id)
def test_share_filter_all_by_share_group(self):
group = db_utils.create_share_group()
share = db_utils.create_share(share_group_id=group['id'])
actual_result = db_api.share_get_all_by_share_group_id(
self.ctxt, group['id'])
self.assertEqual(1, len(actual_result))
self.assertEqual(share['id'], actual_result[0].id)
def test_share_instance_delete_with_share(self):
share = db_utils.create_share()
self.assertIsNotNone(db_api.share_get(self.ctxt, share['id']))
self.assertIsNotNone(db_api.share_metadata_get(self.ctxt, share['id']))
db_api.share_instance_delete(self.ctxt, share.instance['id'])
self.assertRaises(exception.NotFound, db_api.share_get,
self.ctxt, share['id'])
self.assertRaises(exception.NotFound, db_api.share_metadata_get,
self.ctxt, share['id'])
def test_share_instance_delete_with_share_need_to_update_usages(self):
share = db_utils.create_share()
self.assertIsNotNone(db_api.share_get(self.ctxt, share['id']))
self.assertIsNotNone(db_api.share_metadata_get(self.ctxt, share['id']))
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value='reservation'))
self.mock_object(quota.QUOTAS, 'commit')
db_api.share_instance_delete(
self.ctxt, share.instance['id'], need_to_update_usages=True)
self.assertRaises(exception.NotFound, db_api.share_get,
self.ctxt, share['id'])
self.assertRaises(exception.NotFound, db_api.share_metadata_get,
self.ctxt, share['id'])
quota.QUOTAS.reserve.assert_called_once_with(
self.ctxt,
project_id=share['project_id'],
shares=-1,
gigabytes=-share['size'],
share_type_id=None,
user_id=share['user_id']
)
quota.QUOTAS.commit.assert_called_once_with(
self.ctxt,
mock.ANY,
project_id=share['project_id'],
share_type_id=None,
user_id=share['user_id']
)
def test_share_instance_get(self):
share = db_utils.create_share()
instance = db_api.share_instance_get(self.ctxt, share.instance['id'])
self.assertEqual('share-%s' % instance['id'], instance['name'])
@ddt.data({'with_share_data': True, 'status': constants.STATUS_AVAILABLE},
{'with_share_data': False, 'status': None})
@ddt.unpack
def test_share_instance_get_all_by_host(self, with_share_data, status):
kwargs = {'status': status} if status else {}
db_utils.create_share(**kwargs)
instances = db_api.share_instances_get_all_by_host(
self.ctxt, 'fake_host', with_share_data=with_share_data,
status=status)
self.assertEqual(1, len(instances))
instance = instances[0]
self.assertEqual('share-%s' % instance['id'], instance['name'])
if with_share_data:
self.assertEqual('NFS', instance['share_proto'])
self.assertEqual(0, instance['size'])
else:
self.assertNotIn('share_proto', instance)
def test_share_instance_get_all_by_host_not_found_exception(self):
db_utils.create_share()
self.mock_object(db_api, 'share_get', mock.Mock(
side_effect=exception.NotFound))
instances = db_api.share_instances_get_all_by_host(
self.ctxt, 'fake_host', True)
self.assertEqual(0, len(instances))
def test_share_instance_get_all_by_share_group(self):
group = db_utils.create_share_group()
db_utils.create_share(share_group_id=group['id'])
db_utils.create_share()
instances = db_api.share_instances_get_all_by_share_group_id(
self.ctxt, group['id'])
self.assertEqual(1, len(instances))
instance = instances[0]
self.assertEqual('share-%s' % instance['id'], instance['name'])
@ddt.data('id', 'path')
def test_share_instance_get_all_by_export_location(self, type):
share = db_utils.create_share()
initial_location = ['fake_export_location']
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_location, False)
if type == 'id':
export_location = (
db_api.share_export_locations_get_by_share_id(self.ctxt,
share['id']))
value = export_location[0]['uuid']
else:
value = 'fake_export_location'
instances = db_api.share_instances_get_all(
self.ctxt, filters={'export_location_' + type: value})
self.assertEqual(1, len(instances))
instance = instances[0]
self.assertEqual('share-%s' % instance['id'], instance['name'])
def test_share_instance_get_all_by_ids(self):
fake_share = db_utils.create_share()
expected_share_instance = db_utils.create_share_instance(
share_id=fake_share['id'])
# Populate the db with a dummy share
db_utils.create_share_instance(share_id=fake_share['id'])
instances = db_api.share_instances_get_all(
self.ctxt,
filters={'instance_ids': [expected_share_instance['id']]})
self.assertEqual(1, len(instances))
instance = instances[0]
self.assertEqual('share-%s' % instance['id'], instance['name'])
@ddt.data('host', 'share_group_id')
def test_share_get_all_sort_by_share_instance_fields(self, sort_key):
shares = [db_utils.create_share(**{sort_key: n, 'size': 1})
for n in ('test1', 'test2')]
actual_result = db_api.share_get_all(
self.ctxt, sort_key=sort_key, sort_dir='desc')
self.assertEqual(2, len(actual_result))
self.assertEqual(shares[0]['id'], actual_result[1]['id'])
@ddt.data('id')
def test_share_get_all_sort_by_share_fields(self, sort_key):
shares = [db_utils.create_share(**{sort_key: n, 'size': 1})
for n in ('FAKE_UUID1', 'FAKE_UUID2')]
actual_result = db_api.share_get_all(
self.ctxt, sort_key=sort_key, sort_dir='desc')
self.assertEqual(2, len(actual_result))
self.assertEqual(shares[0]['id'], actual_result[1]['id'])
@ddt.data('id', 'path')
def test_share_get_all_by_export_location(self, type):
share = db_utils.create_share()
initial_location = ['fake_export_location']
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_location, False)
if type == 'id':
export_location = db_api.share_export_locations_get_by_share_id(
self.ctxt, share['id'])
value = export_location[0]['uuid']
else:
value = 'fake_export_location'
actual_result = db_api.share_get_all(
self.ctxt, filters={'export_location_' + type: value})
self.assertEqual(1, len(actual_result))
self.assertEqual(share['id'], actual_result[0]['id'])
@ddt.data('id', 'path')
def test_share_get_all_by_export_location_not_exist(self, type):
share = db_utils.create_share()
initial_location = ['fake_export_location']
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_location, False)
filter = {'export_location_' + type: 'export_location_not_exist'}
actual_result = db_api.share_get_all(self.ctxt, filters=filter)
self.assertEqual(0, len(actual_result))
@ddt.data((10, 5), (20, 5))
@ddt.unpack
def test_share_get_all_with_limit(self, limit, offset):
for i in range(limit + 5):
db_utils.create_share()
filters = {'limit': offset, 'offset': 0}
shares_not_requested = db_api.share_get_all(
self.ctxt, filters=filters)
filters = {'limit': limit, 'offset': offset}
shares_requested = db_api.share_get_all(self.ctxt, filters=filters)
shares_not_requested_ids = [s['id'] for s in shares_not_requested]
shares_requested_ids = [s['id'] for s in shares_requested]
self.assertEqual(offset, len(shares_not_requested_ids))
self.assertEqual(limit, len(shares_requested_ids))
self.assertEqual(0, len(
set(shares_requested_ids) & set(shares_not_requested_ids)))
@ddt.data(
({'display_name~': 'fake_name'}, 3, 3),
({'display_name~': 'fake_name', 'limit': 2}, 3, 2)
)
@ddt.unpack
def test_share_get_all_with_count(self, filters, amount_of_shares,
expected_shares_len):
[db_utils.create_share(display_name='fake_name_%s' % str(i))
for i in range(amount_of_shares)]
count, shares = db_api.share_get_all_with_count(
self.ctxt, filters=filters)
self.assertEqual(count, amount_of_shares)
for share in shares:
self.assertIn('fake_name', share['display_name'])
self.assertEqual(expected_shares_len, len(shares))
def test_share_get_all_by_share_group_id_with_count(self):
share_groups = [db_utils.create_share_group() for i in range(2)]
shares = [
db_utils.create_share(share_group_id=share_group['id'])
for share_group in share_groups]
count, result = db_api.share_get_all_by_share_group_id_with_count(
self.ctxt, share_groups[0]['id'])
self.assertEqual(count, 1)
self.assertEqual(shares[0]['id'], result[0]['id'])
self.assertEqual(1, len(result))
def test_share_get_all_by_share_server_with_count(self):
share_servers = [db_utils.create_share_server() for i in range(2)]
shares = [
db_utils.create_share(share_server_id=share_server['id'])
for share_server in share_servers]
count, result = db_api.share_get_all_by_share_server_with_count(
self.ctxt, share_servers[0]['id'])
self.assertEqual(count, 1)
self.assertEqual(shares[0]['id'], result[0]['id'])
self.assertEqual(1, len(result))
def test_share_get_all_by_project_with_count(self):
project_ids = ['fake_id_1', 'fake_id_2']
shares = [
db_utils.create_share(project_id=project_id)
for project_id in project_ids]
count, result = db_api.share_get_all_by_project_with_count(
self.ctxt, project_ids[0])
self.assertEqual(count, 1)
self.assertEqual(shares[0]['id'], result[0]['id'])
self.assertEqual(1, len(result))
@ddt.data(
({'status': constants.STATUS_AVAILABLE}, 'status',
[constants.STATUS_AVAILABLE, constants.STATUS_ERROR]),
({'share_group_id': 'fake_group_id'}, 'share_group_id',
['fake_group_id', 'group_id']),
({'snapshot_id': 'fake_snapshot_id'}, 'snapshot_id',
['fake_snapshot_id', 'snapshot_id']),
({'share_type_id': 'fake_type_id'}, 'share_type_id',
['fake_type_id', 'type_id']),
({'host': 'fakehost@fakebackend#fakepool'}, 'host',
['fakehost@fakebackend#fakepool', 'foo@bar#test']),
({'share_network_id': 'fake_net_id'}, 'share_network_id',
['fake_net_id', 'net_id']),
({'display_name': 'fake_share_name'}, 'display_name',
['fake_share_name', 'share_name']),
({'display_description': 'fake description'}, 'display_description',
['fake description', 'description'])
)
@ddt.unpack
def test_share_get_all_with_filters(self, filters, key, share_values):
for value in share_values:
kwargs = {key: value}
db_utils.create_share(**kwargs)
results = db_api.share_get_all(self.ctxt, filters=filters)
for share in results:
self.assertEqual(share[key], filters[key])
@ddt.data(
('display_name~', 'display_name',
['fake_name_1', 'fake_name_2', 'fake_name_3'], 'fake_name'),
('display_description~', 'display_description',
['fake desc 1', 'fake desc 2', 'fake desc 3'], 'fake desc')
)
@ddt.unpack
def test_share_get_all_like_filters(
self, filter_name, key, share_values, like_value):
for value in share_values:
kwargs = {key: value}
db_utils.create_share(**kwargs)
db_utils.create_share(
display_name='irrelevant_name',
display_description='should not be queried')
filters = {filter_name: like_value}
results = db_api.share_get_all(self.ctxt, filters=filters)
self.assertEqual(len(share_values), len(results))
@ddt.data(None, 'writable')
def test_share_get_has_replicas_field(self, replication_type):
share = db_utils.create_share(replication_type=replication_type)
db_share = db_api.share_get(self.ctxt, share['id'])
self.assertIn('has_replicas', db_share)
@ddt.data({'with_share_data': False, 'with_share_server': False},
{'with_share_data': False, 'with_share_server': True},
{'with_share_data': True, 'with_share_server': False},
{'with_share_data': True, 'with_share_server': True})
@ddt.unpack
def test_share_replicas_get_all(self, with_share_data,
with_share_server):
share_server = db_utils.create_share_server()
share_1 = db_utils.create_share()
share_2 = db_utils.create_share()
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_ACTIVE,
share_id=share_1['id'],
share_server_id=share_server['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_IN_SYNC,
share_id=share_1['id'],
share_server_id=share_server['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_OUT_OF_SYNC,
share_id=share_2['id'],
share_server_id=share_server['id'])
db_utils.create_share_replica(share_id=share_2['id'])
expected_ss_keys = {
'backend_details', 'host', 'id',
'share_network_subnet_id', 'status',
}
expected_share_keys = {
'project_id', 'share_type_id', 'display_name',
'name', 'share_proto', 'is_public',
'source_share_group_snapshot_member_id',
}
session = db_api.get_session()
with session.begin():
share_replicas = db_api.share_replicas_get_all(
self.ctxt, with_share_server=with_share_server,
with_share_data=with_share_data, session=session)
self.assertEqual(3, len(share_replicas))
for replica in share_replicas:
if with_share_server:
self.assertTrue(expected_ss_keys.issubset(
replica['share_server'].keys()))
else:
self.assertNotIn('share_server', replica.keys())
self.assertEqual(
with_share_data,
expected_share_keys.issubset(replica.keys()))
@ddt.data({'with_share_data': False, 'with_share_server': False},
{'with_share_data': False, 'with_share_server': True},
{'with_share_data': True, 'with_share_server': False},
{'with_share_data': True, 'with_share_server': True})
@ddt.unpack
def test_share_replicas_get_all_by_share(self, with_share_data,
with_share_server):
share_server = db_utils.create_share_server()
share = db_utils.create_share()
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_ACTIVE,
share_id=share['id'],
share_server_id=share_server['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_IN_SYNC,
share_id=share['id'],
share_server_id=share_server['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_OUT_OF_SYNC,
share_id=share['id'],
share_server_id=share_server['id'])
expected_ss_keys = {
'backend_details', 'host', 'id',
'share_network_subnet_id', 'status',
}
expected_share_keys = {
'project_id', 'share_type_id', 'display_name',
'name', 'share_proto', 'is_public',
'source_share_group_snapshot_member_id',
}
session = db_api.get_session()
with session.begin():
share_replicas = db_api.share_replicas_get_all_by_share(
self.ctxt, share['id'],
with_share_server=with_share_server,
with_share_data=with_share_data, session=session)
self.assertEqual(3, len(share_replicas))
for replica in share_replicas:
if with_share_server:
self.assertTrue(expected_ss_keys.issubset(
replica['share_server'].keys()))
else:
self.assertNotIn('share_server', replica.keys())
self.assertEqual(with_share_data,
expected_share_keys.issubset(replica.keys()))
def test_share_replicas_get_available_active_replica(self):
share_server = db_utils.create_share_server()
share_1 = db_utils.create_share()
share_2 = db_utils.create_share()
share_3 = db_utils.create_share()
db_utils.create_share_replica(
id='Replica1',
share_id=share_1['id'],
status=constants.STATUS_AVAILABLE,
replica_state=constants.REPLICA_STATE_ACTIVE,
share_server_id=share_server['id'])
db_utils.create_share_replica(
id='Replica2',
status=constants.STATUS_AVAILABLE,
share_id=share_1['id'],
replica_state=constants.REPLICA_STATE_ACTIVE,
share_server_id=share_server['id'])
db_utils.create_share_replica(
id='Replica3',
status=constants.STATUS_AVAILABLE,
share_id=share_2['id'],
replica_state=constants.REPLICA_STATE_ACTIVE)
db_utils.create_share_replica(
id='Replica4',
status=constants.STATUS_ERROR,
share_id=share_2['id'],
replica_state=constants.REPLICA_STATE_ACTIVE)
db_utils.create_share_replica(
id='Replica5',
status=constants.STATUS_AVAILABLE,
share_id=share_2['id'],
replica_state=constants.REPLICA_STATE_IN_SYNC)
db_utils.create_share_replica(
id='Replica6',
share_id=share_3['id'],
status=constants.STATUS_AVAILABLE,
replica_state=constants.REPLICA_STATE_IN_SYNC)
session = db_api.get_session()
expected_ss_keys = {
'backend_details', 'host', 'id',
'share_network_subnet_id', 'status',
}
expected_share_keys = {
'project_id', 'share_type_id', 'display_name',
'name', 'share_proto', 'is_public',
'source_share_group_snapshot_member_id',
}
with session.begin():
replica_share_1 = (
db_api.share_replicas_get_available_active_replica(
self.ctxt, share_1['id'], with_share_server=True,
session=session)
)
replica_share_2 = (
db_api.share_replicas_get_available_active_replica(
self.ctxt, share_2['id'], with_share_data=True,
session=session)
)
replica_share_3 = (
db_api.share_replicas_get_available_active_replica(
self.ctxt, share_3['id'], session=session)
)
self.assertIn(replica_share_1.get('id'), ['Replica1', 'Replica2'])
self.assertTrue(expected_ss_keys.issubset(
replica_share_1['share_server'].keys()))
self.assertFalse(
expected_share_keys.issubset(replica_share_1.keys()))
self.assertEqual(replica_share_2.get('id'), 'Replica3')
self.assertFalse(replica_share_2['share_server'])
self.assertTrue(
expected_share_keys.issubset(replica_share_2.keys()))
self.assertIsNone(replica_share_3)
def test_share_replica_get_exception(self):
replica = db_utils.create_share_replica(share_id='FAKE_SHARE_ID')
self.assertRaises(exception.ShareReplicaNotFound,
db_api.share_replica_get,
self.ctxt, replica['id'])
def test_share_replica_get_without_share_data(self):
share = db_utils.create_share()
replica = db_utils.create_share_replica(
share_id=share['id'],
replica_state=constants.REPLICA_STATE_ACTIVE)
expected_extra_keys = {
'project_id', 'share_type_id', 'display_name',
'name', 'share_proto', 'is_public',
'source_share_group_snapshot_member_id',
}
share_replica = db_api.share_replica_get(self.ctxt, replica['id'])
self.assertIsNotNone(share_replica['replica_state'])
self.assertEqual(share['id'], share_replica['share_id'])
self.assertFalse(expected_extra_keys.issubset(share_replica.keys()))
def test_share_replica_get_with_share_data(self):
share = db_utils.create_share()
replica = db_utils.create_share_replica(
share_id=share['id'],
replica_state=constants.REPLICA_STATE_ACTIVE)
expected_extra_keys = {
'project_id', 'share_type_id', 'display_name',
'name', 'share_proto', 'is_public',
'source_share_group_snapshot_member_id',
}
share_replica = db_api.share_replica_get(
self.ctxt, replica['id'], with_share_data=True)
self.assertIsNotNone(share_replica['replica_state'])
self.assertEqual(share['id'], share_replica['share_id'])
self.assertTrue(expected_extra_keys.issubset(share_replica.keys()))
def test_share_replica_get_with_share_server(self):
session = db_api.get_session()
share_server = db_utils.create_share_server()
share = db_utils.create_share()
replica = db_utils.create_share_replica(
share_id=share['id'],
replica_state=constants.REPLICA_STATE_ACTIVE,
share_server_id=share_server['id']
)
expected_extra_keys = {
'backend_details', 'host', 'id',
'share_network_subnet_id', 'status',
}
with session.begin():
share_replica = db_api.share_replica_get(
self.ctxt, replica['id'], with_share_server=True,
session=session)
self.assertIsNotNone(share_replica['replica_state'])
self.assertEqual(
share_server['id'], share_replica['share_server_id'])
self.assertTrue(expected_extra_keys.issubset(
share_replica['share_server'].keys()))
def test_share_replica_update(self):
share = db_utils.create_share()
replica = db_utils.create_share_replica(
share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE)
updated_replica = db_api.share_replica_update(
self.ctxt, replica['id'],
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})
self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
updated_replica['replica_state'])
def test_share_replica_delete(self):
share = db_utils.create_share()
share = db_api.share_get(self.ctxt, share['id'])
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value='reservation'))
self.mock_object(quota.QUOTAS, 'commit')
replica = db_utils.create_share_replica(
share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE)
self.assertEqual(1, len(
db_api.share_replicas_get_all_by_share(self.ctxt, share['id'])))
db_api.share_replica_delete(self.ctxt, replica['id'])
self.assertEqual(
[], db_api.share_replicas_get_all_by_share(self.ctxt, share['id']))
share_type_id = share['instances'][0].get('share_type_id', None)
quota.QUOTAS.reserve.assert_called_once_with(
self.ctxt, project_id=share['project_id'],
user_id=share['user_id'], share_type_id=share_type_id,
share_replicas=-1, replica_gigabytes=share['size'])
quota.QUOTAS.commit.assert_called_once_with(
self.ctxt, 'reservation', project_id=share['project_id'],
user_id=share['user_id'], share_type_id=share_type_id)
@ddt.data(
(True, {"share_replicas": -1, "replica_gigabytes": 0}, 'active'),
(False, {"shares": -1, "gigabytes": 0}, None),
(False, {"shares": -1, "gigabytes": 0,
"share_replicas": -1, "replica_gigabytes": 0}, 'active')
)
@ddt.unpack
def test_share_instance_delete_quota_error(self, is_replica, deltas,
replica_state):
share = db_utils.create_share(replica_state=replica_state)
share = db_api.share_get(self.ctxt, share['id'])
instance_id = share['instances'][0]['id']
if is_replica:
replica = db_utils.create_share_replica(
share_id=share['id'],
replica_state=constants.REPLICA_STATE_ACTIVE)
instance_id = replica['id']
reservation = 'fake'
share_type_id = share['instances'][0]['share_type_id']
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value=reservation))
self.mock_object(quota.QUOTAS, 'commit', mock.Mock(
side_effect=exception.QuotaError('fake')))
self.mock_object(quota.QUOTAS, 'rollback')
# NOTE(silvacarlose): not calling with assertRaises since the
# _update_share_instance_usages method is not raising an exception
db_api.share_instance_delete(
self.ctxt, instance_id, session=None, need_to_update_usages=True)
quota.QUOTAS.reserve.assert_called_once_with(
self.ctxt, project_id=share['project_id'],
user_id=share['user_id'], share_type_id=share_type_id, **deltas)
quota.QUOTAS.commit.assert_called_once_with(
self.ctxt, reservation, project_id=share['project_id'],
user_id=share['user_id'], share_type_id=share_type_id)
quota.QUOTAS.rollback.assert_called_once_with(
self.ctxt, reservation, share_type_id=share_type_id)
def test_share_instance_access_copy(self):
share = db_utils.create_share()
rules = []
for i in range(0, 5):
rules.append(db_utils.create_access(share_id=share['id']))
instance = db_utils.create_share_instance(share_id=share['id'])
share_access_rules = db_api.share_instance_access_copy(
self.ctxt, share['id'], instance['id'])
share_access_rule_ids = [a['id'] for a in share_access_rules]
self.assertEqual(5, len(share_access_rules))
for rule_id in share_access_rule_ids:
self.assertIsNotNone(
db_api.share_instance_access_get(
self.ctxt, rule_id, instance['id']))
@ddt.ddt
class ShareGroupDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(ShareGroupDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_share_group_create_with_share_type(self):
fake_share_types = ["fake_share_type"]
share_group = db_utils.create_share_group(share_types=fake_share_types)
share_group = db_api.share_group_get(self.ctxt, share_group['id'])
self.assertEqual(1, len(share_group['share_types']))
def test_share_group_get(self):
share_group = db_utils.create_share_group()
self.assertDictMatch(
dict(share_group),
dict(db_api.share_group_get(self.ctxt, share_group['id'])))
def test_count_share_groups_in_share_network(self):
share_network = db_utils.create_share_network()
db_utils.create_share_group()
db_utils.create_share_group(share_network_id=share_network['id'])
count = db_api.count_share_groups_in_share_network(
self.ctxt, share_network_id=share_network['id'])
self.assertEqual(1, count)
def test_share_group_get_all(self):
expected_share_group = db_utils.create_share_group()
share_groups = db_api.share_group_get_all(self.ctxt, detailed=False)
self.assertEqual(1, len(share_groups))
share_group = share_groups[0]
self.assertEqual(2, len(dict(share_group).keys()))
self.assertEqual(expected_share_group['id'], share_group['id'])
self.assertEqual(expected_share_group['name'], share_group['name'])
def test_share_group_get_all_with_detail(self):
expected_share_group = db_utils.create_share_group()
share_groups = db_api.share_group_get_all(self.ctxt, detailed=True)
self.assertEqual(1, len(share_groups))
self.assertDictMatch(dict(expected_share_group), dict(share_groups[0]))
def test_share_group_get_all_by_host(self):
fake_host = 'my_fake_host'
expected_share_group = db_utils.create_share_group(host=fake_host)
db_utils.create_share_group()
share_groups = db_api.share_group_get_all_by_host(
self.ctxt, fake_host, detailed=False)
self.assertEqual(1, len(share_groups))
share_group = share_groups[0]
self.assertEqual(2, len(dict(share_group).keys()))
self.assertEqual(expected_share_group['id'], share_group['id'])
self.assertEqual(expected_share_group['name'], share_group['name'])
def test_share_group_get_all_by_host_with_details(self):
fake_host = 'my_fake_host'
expected_share_group = db_utils.create_share_group(host=fake_host)
db_utils.create_share_group()
share_groups = db_api.share_group_get_all_by_host(
self.ctxt, fake_host, detailed=True)
self.assertEqual(1, len(share_groups))
share_group = share_groups[0]
self.assertDictMatch(dict(expected_share_group), dict(share_group))
self.assertEqual(fake_host, share_group['host'])
def test_share_group_get_all_by_project(self):
fake_project = 'fake_project'
expected_group = db_utils.create_share_group(
project_id=fake_project)
db_utils.create_share_group()
groups = db_api.share_group_get_all_by_project(self.ctxt,
fake_project,
detailed=False)
self.assertEqual(1, len(groups))
group = groups[0]
self.assertEqual(2, len(dict(group).keys()))
self.assertEqual(expected_group['id'], group['id'])
self.assertEqual(expected_group['name'], group['name'])
def test_share_group_get_all_by_share_server(self):
fake_server = 123
expected_group = db_utils.create_share_group(
share_server_id=fake_server)
db_utils.create_share_group()
groups = db_api.share_group_get_all_by_share_server(self.ctxt,
fake_server)
self.assertEqual(1, len(groups))
group = groups[0]
self.assertEqual(expected_group['id'], group['id'])
self.assertEqual(expected_group['name'], group['name'])
def test_share_group_get_all_by_project_with_details(self):
fake_project = 'fake_project'
expected_group = db_utils.create_share_group(
project_id=fake_project)
db_utils.create_share_group()
groups = db_api.share_group_get_all_by_project(self.ctxt,
fake_project,
detailed=True)
self.assertEqual(1, len(groups))
group = groups[0]
self.assertDictMatch(dict(expected_group), dict(group))
self.assertEqual(fake_project, group['project_id'])
@ddt.data(({'name': 'fo'}, 0), ({'description': 'd'}, 0),
({'name': 'foo', 'description': 'd'}, 0),
({'name': 'foo'}, 1), ({'description': 'ds'}, 1),
({'name~': 'foo', 'description~': 'ds'}, 2),
({'name': 'foo', 'description~': 'ds'}, 1),
({'name~': 'foo', 'description': 'ds'}, 1))
@ddt.unpack
def test_share_group_get_all_by_name_and_description(
self, search_opts, group_number):
db_utils.create_share_group(name='fo1', description='d1')
expected_group1 = db_utils.create_share_group(name='foo',
description='ds')
expected_group2 = db_utils.create_share_group(name='foo1',
description='ds2')
groups = db_api.share_group_get_all(
self.ctxt, detailed=True,
filters=search_opts)
self.assertEqual(group_number, len(groups))
if group_number == 1:
self.assertDictMatch(dict(expected_group1), dict(groups[0]))
elif group_number == 2:
self.assertDictMatch(dict(expected_group1), dict(groups[1]))
self.assertDictMatch(dict(expected_group2), dict(groups[0]))
def test_share_group_update(self):
fake_name = "my_fake_name"
expected_group = db_utils.create_share_group()
expected_group['name'] = fake_name
db_api.share_group_update(self.ctxt,
expected_group['id'],
{'name': fake_name})
group = db_api.share_group_get(self.ctxt, expected_group['id'])
self.assertEqual(fake_name, group['name'])
def test_share_group_destroy(self):
group = db_utils.create_share_group()
db_api.share_group_get(self.ctxt, group['id'])
db_api.share_group_destroy(self.ctxt, group['id'])
self.assertRaises(exception.NotFound, db_api.share_group_get,
self.ctxt, group['id'])
def test_count_shares_in_share_group(self):
sg = db_utils.create_share_group()
db_utils.create_share(share_group_id=sg['id'])
db_utils.create_share()
count = db_api.count_shares_in_share_group(self.ctxt, sg['id'])
self.assertEqual(1, count)
def test_count_sg_snapshots_in_share_group(self):
sg = db_utils.create_share_group()
db_utils.create_share_group_snapshot(sg['id'])
db_utils.create_share_group_snapshot(sg['id'])
count = db_api.count_share_group_snapshots_in_share_group(
self.ctxt, sg['id'])
self.assertEqual(2, count)
def test_share_group_snapshot_get(self):
sg = db_utils.create_share_group()
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
self.assertDictMatch(
dict(sg_snap),
dict(db_api.share_group_snapshot_get(self.ctxt, sg_snap['id'])))
def test_share_group_snapshot_get_all(self):
sg = db_utils.create_share_group()
expected_sg_snap = db_utils.create_share_group_snapshot(sg['id'])
snaps = db_api.share_group_snapshot_get_all(self.ctxt, detailed=False)
self.assertEqual(1, len(snaps))
snap = snaps[0]
self.assertEqual(2, len(dict(snap).keys()))
self.assertEqual(expected_sg_snap['id'], snap['id'])
self.assertEqual(expected_sg_snap['name'], snap['name'])
def test_share_group_snapshot_get_all_with_detail(self):
sg = db_utils.create_share_group()
expected_sg_snap = db_utils.create_share_group_snapshot(sg['id'])
snaps = db_api.share_group_snapshot_get_all(self.ctxt, detailed=True)
self.assertEqual(1, len(snaps))
snap = snaps[0]
self.assertDictMatch(dict(expected_sg_snap), dict(snap))
def test_share_group_snapshot_get_all_by_project(self):
fake_project = uuidutils.generate_uuid()
sg = db_utils.create_share_group()
expected_sg_snap = db_utils.create_share_group_snapshot(
sg['id'], project_id=fake_project)
snaps = db_api.share_group_snapshot_get_all_by_project(
self.ctxt, fake_project, detailed=False)
self.assertEqual(1, len(snaps))
snap = snaps[0]
self.assertEqual(2, len(dict(snap).keys()))
self.assertEqual(expected_sg_snap['id'], snap['id'])
self.assertEqual(expected_sg_snap['name'], snap['name'])
def test_share_group_snapshot_get_all_by_project_with_details(self):
fake_project = uuidutils.generate_uuid()
sg = db_utils.create_share_group()
expected_sg_snap = db_utils.create_share_group_snapshot(
sg['id'], project_id=fake_project)
snaps = db_api.share_group_snapshot_get_all_by_project(
self.ctxt, fake_project, detailed=True)
self.assertEqual(1, len(snaps))
snap = snaps[0]
self.assertDictMatch(dict(expected_sg_snap), dict(snap))
self.assertEqual(fake_project, snap['project_id'])
def test_share_group_snapshot_update(self):
fake_name = "my_fake_name"
sg = db_utils.create_share_group()
expected_sg_snap = db_utils.create_share_group_snapshot(sg['id'])
expected_sg_snap['name'] = fake_name
db_api.share_group_snapshot_update(
self.ctxt, expected_sg_snap['id'], {'name': fake_name})
sg_snap = db_api.share_group_snapshot_get(
self.ctxt, expected_sg_snap['id'])
self.assertEqual(fake_name, sg_snap['name'])
def test_share_group_snapshot_destroy(self):
sg = db_utils.create_share_group()
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
db_api.share_group_snapshot_get(self.ctxt, sg_snap['id'])
db_api.share_group_snapshot_destroy(self.ctxt, sg_snap['id'])
self.assertRaises(
exception.NotFound,
db_api.share_group_snapshot_get, self.ctxt, sg_snap['id'])
def test_share_group_snapshot_members_get_all(self):
sg = db_utils.create_share_group()
share = db_utils.create_share(share_group_id=sg['id'])
si = db_utils.create_share_instance(share_id=share['id'])
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
expected_member = db_utils.create_share_group_snapshot_member(
sg_snap['id'], share_instance_id=si['id'])
members = db_api.share_group_snapshot_members_get_all(
self.ctxt, sg_snap['id'])
self.assertEqual(1, len(members))
self.assertDictMatch(dict(expected_member), dict(members[0]))
def test_count_share_group_snapshot_members_in_share(self):
sg = db_utils.create_share_group()
share = db_utils.create_share(share_group_id=sg['id'])
si = db_utils.create_share_instance(share_id=share['id'])
share2 = db_utils.create_share(share_group_id=sg['id'])
si2 = db_utils.create_share_instance(share_id=share2['id'])
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
db_utils.create_share_group_snapshot_member(
sg_snap['id'], share_instance_id=si['id'])
db_utils.create_share_group_snapshot_member(
sg_snap['id'], share_instance_id=si2['id'])
count = db_api.count_share_group_snapshot_members_in_share(
self.ctxt, share['id'])
self.assertEqual(1, count)
def test_share_group_snapshot_members_get(self):
sg = db_utils.create_share_group()
share = db_utils.create_share(share_group_id=sg['id'])
si = db_utils.create_share_instance(share_id=share['id'])
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
expected_member = db_utils.create_share_group_snapshot_member(
sg_snap['id'], share_instance_id=si['id'])
member = db_api.share_group_snapshot_member_get(
self.ctxt, expected_member['id'])
self.assertDictMatch(dict(expected_member), dict(member))
def test_share_group_snapshot_members_get_not_found(self):
self.assertRaises(
exception.ShareGroupSnapshotMemberNotFound,
db_api.share_group_snapshot_member_get, self.ctxt, 'fake_id')
def test_share_group_snapshot_member_update(self):
sg = db_utils.create_share_group()
share = db_utils.create_share(share_group_id=sg['id'])
si = db_utils.create_share_instance(share_id=share['id'])
sg_snap = db_utils.create_share_group_snapshot(sg['id'])
expected_member = db_utils.create_share_group_snapshot_member(
sg_snap['id'], share_instance_id=si['id'])
db_api.share_group_snapshot_member_update(
self.ctxt, expected_member['id'],
{'status': constants.STATUS_AVAILABLE})
member = db_api.share_group_snapshot_member_get(
self.ctxt, expected_member['id'])
self.assertEqual(constants.STATUS_AVAILABLE, member['status'])
@ddt.ddt
class ShareGroupTypeAPITestCase(test.TestCase):
def setUp(self):
super(ShareGroupTypeAPITestCase, self).setUp()
self.ctxt = context.RequestContext(
user_id='user_id', project_id='project_id', is_admin=True)
@ddt.data(True, False)
def test_share_type_destroy_in_use(self, used_by_groups):
share_type_1 = db_utils.create_share_type(name='fike')
share_type_2 = db_utils.create_share_type(name='bowman')
share_group_type_1 = db_utils.create_share_group_type(
name='orange', is_public=False, share_types=[share_type_1['id']],
group_specs={'dabo': 'allin', 'cadence': 'count'},
override_defaults=True)
db_api.share_group_type_access_add(self.ctxt,
share_group_type_1['id'],
"2018ndaetfigovnsaslcahfavmrpions")
db_api.share_group_type_access_add(self.ctxt,
share_group_type_1['id'],
"2016ndaetfigovnsaslcahfavmrpions")
share_group_type_2 = db_utils.create_share_group_type(
name='regalia', share_types=[share_type_2['id']])
if used_by_groups:
share_group_1 = db_utils.create_share_group(
share_group_type_id=share_group_type_1['id'],
share_types=[share_type_1['id']])
share_group_2 = db_utils.create_share_group(
share_group_type_id=share_group_type_2['id'],
share_types=[share_type_2['id']])
self.assertRaises(exception.ShareGroupTypeInUse,
db_api.share_group_type_destroy,
self.ctxt, share_group_type_1['id'])
self.assertRaises(exception.ShareGroupTypeInUse,
db_api.share_group_type_destroy,
self.ctxt, share_group_type_2['id'])
# Cleanup share groups
db_api.share_group_destroy(self.ctxt, share_group_1['id'])
db_api.share_group_destroy(self.ctxt, share_group_2['id'])
# Let's cleanup share_group_type_1 and verify it is gone
self.assertIsNone(db_api.share_group_type_destroy(
self.ctxt, share_group_type_1['id']))
self.assertDictMatch(
{}, db_api.share_group_type_specs_get(
self.ctxt, share_group_type_1['id']))
self.assertRaises(exception.ShareGroupTypeNotFound,
db_api.share_group_type_access_get_all,
self.ctxt, share_group_type_1['id'])
self.assertRaises(exception.ShareGroupTypeNotFound,
db_api.share_group_type_get,
self.ctxt, share_group_type_1['id'])
# share_group_type_2 must still be around
self.assertEqual(share_group_type_2['id'],
db_api.share_group_type_get(
self.ctxt, share_group_type_2['id'])['id'])
@ddt.ddt
class ShareSnapshotDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(ShareSnapshotDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.share_instances = [
db_utils.create_share_instance(
status=constants.STATUS_REPLICATION_CHANGE,
share_id='fake_share_id_1'),
db_utils.create_share_instance(
status=constants.STATUS_AVAILABLE,
share_id='fake_share_id_1'),
db_utils.create_share_instance(
status=constants.STATUS_ERROR_DELETING,
share_id='fake_share_id_2'),
db_utils.create_share_instance(
status=constants.STATUS_MANAGING,
share_id='fake_share_id_2'),
]
self.share_1 = db_utils.create_share(
id='fake_share_id_1', instances=self.share_instances[0:2])
self.share_2 = db_utils.create_share(
id='fake_share_id_2', instances=self.share_instances[2:-1])
self.snapshot_instances = [
db_utils.create_snapshot_instance(
'fake_snapshot_id_1',
status=constants.STATUS_CREATING,
share_instance_id=self.share_instances[0]['id']),
db_utils.create_snapshot_instance(
'fake_snapshot_id_1',
status=constants.STATUS_ERROR,
share_instance_id=self.share_instances[1]['id']),
db_utils.create_snapshot_instance(
'fake_snapshot_id_1',
status=constants.STATUS_DELETING,
share_instance_id=self.share_instances[2]['id']),
db_utils.create_snapshot_instance(
'fake_snapshot_id_2',
status=constants.STATUS_AVAILABLE,
id='fake_snapshot_instance_id',
provider_location='hogsmeade:snapshot1',
progress='87%',
share_instance_id=self.share_instances[3]['id']),
]
self.snapshot_1 = db_utils.create_snapshot(
id='fake_snapshot_id_1', share_id=self.share_1['id'],
instances=self.snapshot_instances[0:3])
self.snapshot_2 = db_utils.create_snapshot(
id='fake_snapshot_id_2', share_id=self.share_2['id'],
instances=self.snapshot_instances[3:4])
self.snapshot_instance_export_locations = [
db_utils.create_snapshot_instance_export_locations(
self.snapshot_instances[0].id,
path='1.1.1.1:/fake_path',
is_admin_only=True),
db_utils.create_snapshot_instance_export_locations(
self.snapshot_instances[1].id,
path='2.2.2.2:/fake_path',
is_admin_only=True),
db_utils.create_snapshot_instance_export_locations(
self.snapshot_instances[2].id,
path='3.3.3.3:/fake_path',
is_admin_only=True),
db_utils.create_snapshot_instance_export_locations(
self.snapshot_instances[3].id,
path='4.4.4.4:/fake_path',
is_admin_only=True)
]
def test_create(self):
share = db_utils.create_share(size=1)
values = {
'share_id': share['id'],
'size': share['size'],
'user_id': share['user_id'],
'project_id': share['project_id'],
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_size': share['size'],
'display_name': 'fake',
'display_description': 'fake',
'share_proto': share['share_proto']
}
actual_result = db_api.share_snapshot_create(
self.ctxt, values, create_snapshot_instance=True)
self.assertEqual(1, len(actual_result.instances))
self.assertSubDictMatch(values, actual_result.to_dict())
def test_share_snapshot_get_latest_for_share(self):
share = db_utils.create_share(size=1)
values = {
'share_id': share['id'],
'size': share['size'],
'user_id': share['user_id'],
'project_id': share['project_id'],
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_size': share['size'],
'display_description': 'fake',
'share_proto': share['share_proto'],
}
values1 = copy.deepcopy(values)
values1['display_name'] = 'snap1'
db_api.share_snapshot_create(self.ctxt, values1)
values2 = copy.deepcopy(values)
values2['display_name'] = 'snap2'
db_api.share_snapshot_create(self.ctxt, values2)
values3 = copy.deepcopy(values)
values3['display_name'] = 'snap3'
db_api.share_snapshot_create(self.ctxt, values3)
result = db_api.share_snapshot_get_latest_for_share(self.ctxt,
share['id'])
self.assertSubDictMatch(values3, result.to_dict())
def test_get_instance(self):
snapshot = db_utils.create_snapshot(with_share=True)
instance = db_api.share_snapshot_instance_get(
self.ctxt, snapshot.instance['id'], with_share_data=True)
instance_dict = instance.to_dict()
self.assertTrue(hasattr(instance, 'name'))
self.assertTrue(hasattr(instance, 'share_name'))
self.assertTrue(hasattr(instance, 'share_id'))
self.assertIn('name', instance_dict)
self.assertIn('share_name', instance_dict)
@ddt.data(None, constants.STATUS_ERROR)
def test_share_snapshot_instance_get_all_with_filters_some(self, status):
expected_status = status or (constants.STATUS_CREATING,
constants.STATUS_DELETING)
expected_number = 1 if status else 3
filters = {
'snapshot_ids': 'fake_snapshot_id_1',
'statuses': expected_status
}
instances = db_api.share_snapshot_instance_get_all_with_filters(
self.ctxt, filters)
for instance in instances:
self.assertEqual('fake_snapshot_id_1', instance['snapshot_id'])
self.assertIn(instance['status'], filters['statuses'])
self.assertEqual(expected_number, len(instances))
def test_share_snapshot_instance_get_all_with_filters_all_filters(self):
filters = {
'snapshot_ids': 'fake_snapshot_id_2',
'instance_ids': 'fake_snapshot_instance_id',
'statuses': constants.STATUS_AVAILABLE,
'share_instance_ids': self.share_instances[3]['id'],
}
instances = db_api.share_snapshot_instance_get_all_with_filters(
self.ctxt, filters, with_share_data=True)
self.assertEqual(1, len(instances))
self.assertEqual('fake_snapshot_instance_id', instances[0]['id'])
self.assertEqual(
self.share_2['id'], instances[0]['share_instance']['share_id'])
def test_share_snapshot_instance_get_all_with_filters_wrong_filters(self):
filters = {
'some_key': 'some_value',
'some_other_key': 'some_other_value',
}
instances = db_api.share_snapshot_instance_get_all_with_filters(
self.ctxt, filters)
self.assertEqual(6, len(instances))
def test_share_snapshot_instance_create(self):
snapshot = db_utils.create_snapshot(with_share=True)
share = snapshot['share']
share_instance = db_utils.create_share_instance(share_id=share['id'])
values = {
'snapshot_id': snapshot['id'],
'share_instance_id': share_instance['id'],
'status': constants.STATUS_MANAGING,
'progress': '88%',
'provider_location': 'whomping_willow',
}
actual_result = db_api.share_snapshot_instance_create(
self.ctxt, snapshot['id'], values)
snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id'])
self.assertSubDictMatch(values, actual_result.to_dict())
self.assertEqual(2, len(snapshot['instances']))
def test_share_snapshot_instance_update(self):
snapshot = db_utils.create_snapshot(with_share=True)
values = {
'snapshot_id': snapshot['id'],
'status': constants.STATUS_ERROR,
'progress': '18%',
'provider_location': 'godrics_hollow',
}
actual_result = db_api.share_snapshot_instance_update(
self.ctxt, snapshot['instance']['id'], values)
self.assertSubDictMatch(values, actual_result.to_dict())
@ddt.data(2, 1)
def test_share_snapshot_instance_delete(self, instances):
snapshot = db_utils.create_snapshot(with_share=True)
first_instance_id = snapshot['instance']['id']
if instances > 1:
instance = db_utils.create_snapshot_instance(
snapshot['id'],
share_instance_id=snapshot['share']['instance']['id'])
else:
instance = snapshot['instance']
retval = db_api.share_snapshot_instance_delete(
self.ctxt, instance['id'])
self.assertIsNone(retval)
if instances == 1:
self.assertRaises(exception.ShareSnapshotNotFound,
db_api.share_snapshot_get,
self.ctxt, snapshot['id'])
else:
snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id'])
self.assertEqual(1, len(snapshot['instances']))
self.assertEqual(first_instance_id, snapshot['instance']['id'])
def test_share_snapshot_access_create(self):
values = {
'share_snapshot_id': self.snapshot_1['id'],
}
actual_result = db_api.share_snapshot_access_create(self.ctxt,
values)
self.assertSubDictMatch(values, actual_result.to_dict())
def test_share_snapshot_instance_access_get_all(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
session = db_api.get_session()
values = {'share_snapshot_instance_id': self.snapshot_instances[0].id,
'access_id': access['id']}
rules = db_api.share_snapshot_instance_access_get_all(
self.ctxt, access['id'], session)
self.assertSubDictMatch(values, rules[0].to_dict())
def test_share_snapshot_access_get(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
values = {'share_snapshot_id': self.snapshot_1['id']}
actual_value = db_api.share_snapshot_access_get(
self.ctxt, access['id'])
self.assertSubDictMatch(values, actual_value.to_dict())
def test_share_snapshot_access_get_all_for_share_snapshot(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
values = {'access_type': access['access_type'],
'access_to': access['access_to'],
'share_snapshot_id': self.snapshot_1['id']}
actual_value = db_api.share_snapshot_access_get_all_for_share_snapshot(
self.ctxt, self.snapshot_1['id'], {})
self.assertSubDictMatch(values, actual_value[0].to_dict())
@ddt.data({'existing': {'access_type': 'cephx', 'access_to': 'alice'},
'new': {'access_type': 'user', 'access_to': 'alice'},
'result': False},
{'existing': {'access_type': 'user', 'access_to': 'bob'},
'new': {'access_type': 'user', 'access_to': 'bob'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.0.0.10/32'},
'new': {'access_type': 'ip', 'access_to': '10.0.0.10'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.10.0.11'},
'new': {'access_type': 'ip', 'access_to': '10.10.0.11'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': 'fd21::11'},
'new': {'access_type': 'ip', 'access_to': 'fd21::11'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': 'fd21::10'},
'new': {'access_type': 'ip', 'access_to': 'fd21::10/128'},
'result': True},
{'existing': {'access_type': 'ip', 'access_to': '10.10.0.0/22'},
'new': {'access_type': 'ip', 'access_to': '10.10.0.0/24'},
'result': False},
{'existing': {'access_type': 'ip', 'access_to': '2620:52::/48'},
'new': {'access_type': 'ip',
'access_to': '2620:52:0:13b8::/64'},
'result': False})
@ddt.unpack
def test_share_snapshot_check_for_existing_access(self, existing, new,
result):
db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'],
access_type=existing['access_type'],
access_to=existing['access_to'])
rule_exists = db_api.share_snapshot_check_for_existing_access(
self.ctxt, self.snapshot_1['id'], new['access_type'],
new['access_to'])
self.assertEqual(result, rule_exists)
def test_share_snapshot_access_get_all_for_snapshot_instance(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
values = {'access_type': access['access_type'],
'access_to': access['access_to'],
'share_snapshot_id': self.snapshot_1['id']}
out = db_api.share_snapshot_access_get_all_for_snapshot_instance(
self.ctxt, self.snapshot_instances[0].id)
self.assertSubDictMatch(values, out[0].to_dict())
def test_share_snapshot_instance_access_update_state(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
values = {'state': constants.STATUS_ACTIVE,
'access_id': access['id'],
'share_snapshot_instance_id': self.snapshot_instances[0].id}
actual_result = db_api.share_snapshot_instance_access_update(
self.ctxt, access['id'], self.snapshot_1.instance['id'],
{'state': constants.STATUS_ACTIVE})
self.assertSubDictMatch(values, actual_result.to_dict())
def test_share_snapshot_instance_access_get(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
values = {'access_id': access['id'],
'share_snapshot_instance_id': self.snapshot_instances[0].id}
actual_result = db_api.share_snapshot_instance_access_get(
self.ctxt, access['id'], self.snapshot_instances[0].id)
self.assertSubDictMatch(values, actual_result.to_dict())
def test_share_snapshot_instance_access_delete(self):
access = db_utils.create_snapshot_access(
share_snapshot_id=self.snapshot_1['id'])
db_api.share_snapshot_instance_access_delete(
self.ctxt, access['id'], self.snapshot_1.instance['id'])
def test_share_snapshot_instance_export_location_create(self):
values = {
'share_snapshot_instance_id': self.snapshot_instances[0].id,
}
actual_result = db_api.share_snapshot_instance_export_location_create(
self.ctxt, values)
self.assertSubDictMatch(values, actual_result.to_dict())
def test_share_snapshot_export_locations_get(self):
out = db_api.share_snapshot_export_locations_get(
self.ctxt, self.snapshot_1['id'])
keys = ['share_snapshot_instance_id', 'path', 'is_admin_only']
for expected, actual in zip(self.snapshot_instance_export_locations,
out):
[self.assertEqual(expected[k], actual[k]) for k in keys]
def test_share_snapshot_instance_export_locations_get(self):
out = db_api.share_snapshot_instance_export_locations_get_all(
self.ctxt, self.snapshot_instances[0].id)
keys = ['share_snapshot_instance_id', 'path', 'is_admin_only']
for key in keys:
self.assertEqual(self.snapshot_instance_export_locations[0][key],
out[0][key])
def test_share_snapshot_instance_export_locations_update(self):
snapshot = db_utils.create_snapshot(with_share=True)
initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3']
update_locations = ['fake4/4', 'fake2/2', 'fake3/3']
# add initial locations
db_api.share_snapshot_instance_export_locations_update(
self.ctxt, snapshot.instance['id'], initial_locations, False)
# update locations
db_api.share_snapshot_instance_export_locations_update(
self.ctxt, snapshot.instance['id'], update_locations, True)
get_result = db_api.share_snapshot_instance_export_locations_get_all(
self.ctxt, snapshot.instance['id'])
result_locations = [el['path'] for el in get_result]
self.assertEqual(sorted(result_locations), sorted(update_locations))
def test_share_snapshot_instance_export_locations_update_wrong_type(self):
snapshot = db_utils.create_snapshot(with_share=True)
new_export_locations = [1]
self.assertRaises(
exception.ManilaException,
db_api.share_snapshot_instance_export_locations_update,
self.ctxt, snapshot.instance['id'], new_export_locations, False)
class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(ShareExportLocationsDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_update_valid_order(self):
share = db_utils.create_share()
initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3']
update_locations = ['fake4/4', 'fake2/2', 'fake3/3']
# add initial locations
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_locations, False)
# update locations
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
update_locations, True)
actual_result = db_api.share_export_locations_get(self.ctxt,
share['id'])
# actual result should contain locations in exact same order
self.assertEqual(actual_result, update_locations)
def test_update_string(self):
share = db_utils.create_share()
initial_location = 'fake1/1/'
db_api.share_export_locations_update(self.ctxt, share.instance['id'],
initial_location, False)
actual_result = db_api.share_export_locations_get(self.ctxt,
share['id'])
self.assertEqual(actual_result, [initial_location])
def test_get_admin_export_locations(self):
ctxt_user = context.RequestContext(
user_id='fake user', project_id='fake project', is_admin=False)
share = db_utils.create_share()
locations = [
{'path': 'fake1/1/', 'is_admin_only': True},
{'path': 'fake2/2/', 'is_admin_only': True},
{'path': 'fake3/3/', 'is_admin_only': True},
]
db_api.share_export_locations_update(
self.ctxt, share.instance['id'], locations, delete=False)
user_result = db_api.share_export_locations_get(ctxt_user, share['id'])
self.assertEqual([], user_result)
admin_result = db_api.share_export_locations_get(
self.ctxt, share['id'])
self.assertEqual(3, len(admin_result))
for location in locations:
self.assertIn(location['path'], admin_result)
def test_get_user_export_locations(self):
ctxt_user = context.RequestContext(
user_id='fake user', project_id='fake project', is_admin=False)
share = db_utils.create_share()
locations = [
{'path': 'fake1/1/', 'is_admin_only': False},
{'path': 'fake2/2/', 'is_admin_only': False},
{'path': 'fake3/3/', 'is_admin_only': False},
]
db_api.share_export_locations_update(
self.ctxt, share.instance['id'], locations, delete=False)
user_result = db_api.share_export_locations_get(ctxt_user, share['id'])
self.assertEqual(3, len(user_result))
for location in locations:
self.assertIn(location['path'], user_result)
admin_result = db_api.share_export_locations_get(
self.ctxt, share['id'])
self.assertEqual(3, len(admin_result))
for location in locations:
self.assertIn(location['path'], admin_result)
def test_get_user_export_locations_old_view(self):
ctxt_user = context.RequestContext(
user_id='fake user', project_id='fake project', is_admin=False)
share = db_utils.create_share()
locations = ['fake1/1/', 'fake2/2', 'fake3/3']
db_api.share_export_locations_update(
self.ctxt, share.instance['id'], locations, delete=False)
user_result = db_api.share_export_locations_get(ctxt_user, share['id'])
self.assertEqual(locations, user_result)
admin_result = db_api.share_export_locations_get(
self.ctxt, share['id'])
self.assertEqual(locations, admin_result)
@ddt.ddt
class ShareInstanceExportLocationsMetadataDatabaseAPITestCase(test.TestCase):
def setUp(self):
clname = ShareInstanceExportLocationsMetadataDatabaseAPITestCase
super(clname, self).setUp()
self.ctxt = context.get_admin_context()
share_id = 'fake_share_id'
instances = [
db_utils.create_share_instance(
share_id=share_id,
status=constants.STATUS_AVAILABLE),
db_utils.create_share_instance(
share_id=share_id,
status=constants.STATUS_MIGRATING),
db_utils.create_share_instance(
share_id=share_id,
status=constants.STATUS_MIGRATING_TO),
]
self.share = db_utils.create_share(
id=share_id,
instances=instances)
self.initial_locations = ['/fake/foo/', '/fake/bar', '/fake/quuz']
self.shown_locations = ['/fake/foo/', '/fake/bar']
for i in range(0, 3):
db_api.share_export_locations_update(
self.ctxt, instances[i]['id'], self.initial_locations[i],
delete=False)
def _get_export_location_uuid_by_path(self, path):
els = db_api.share_export_locations_get_by_share_id(
self.ctxt, self.share.id)
export_location_uuid = None
for el in els:
if el.path == path:
export_location_uuid = el.uuid
self.assertIsNotNone(export_location_uuid)
return export_location_uuid
def test_get_export_locations_by_share_id(self):
els = db_api.share_export_locations_get_by_share_id(
self.ctxt, self.share.id)
self.assertEqual(3, len(els))
for path in self.shown_locations:
self.assertTrue(any([path in el.path for el in els]))
def test_get_export_locations_by_share_id_ignore_migration_dest(self):
els = db_api.share_export_locations_get_by_share_id(
self.ctxt, self.share.id, ignore_migration_destination=True)
self.assertEqual(2, len(els))
for path in self.shown_locations:
self.assertTrue(any([path in el.path for el in els]))
def test_get_export_locations_by_share_instance_id(self):
els = db_api.share_export_locations_get_by_share_instance_id(
self.ctxt, self.share.instance.id)
self.assertEqual(1, len(els))
for path in [self.shown_locations[1]]:
self.assertTrue(any([path in el.path for el in els]))
def test_export_location_metadata_update_delete(self):
export_location_uuid = self._get_export_location_uuid_by_path(
self.initial_locations[0])
metadata = {
'foo_key': 'foo_value',
'bar_key': 'bar_value',
'quuz_key': 'quuz_value',
}
db_api.export_location_metadata_update(
self.ctxt, export_location_uuid, metadata, False)
db_api.export_location_metadata_delete(
self.ctxt, export_location_uuid, list(metadata.keys())[0:-1])
result = db_api.export_location_metadata_get(
self.ctxt, export_location_uuid)
key = list(metadata.keys())[-1]
self.assertEqual({key: metadata[key]}, result)
db_api.export_location_metadata_delete(
self.ctxt, export_location_uuid)
result = db_api.export_location_metadata_get(
self.ctxt, export_location_uuid)
self.assertEqual({}, result)
def test_export_location_metadata_update_get(self):
# Write metadata for target export location
export_location_uuid = self._get_export_location_uuid_by_path(
self.initial_locations[0])
metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'}
db_api.export_location_metadata_update(
self.ctxt, export_location_uuid, metadata, False)
# Write metadata for some concurrent export location
other_export_location_uuid = self._get_export_location_uuid_by_path(
self.initial_locations[1])
other_metadata = {'key_from_other_el': 'value_of_key_from_other_el'}
db_api.export_location_metadata_update(
self.ctxt, other_export_location_uuid, other_metadata, False)
result = db_api.export_location_metadata_get(
self.ctxt, export_location_uuid)
self.assertEqual(metadata, result)
updated_metadata = {
'foo_key': metadata['foo_key'],
'quuz_key': 'quuz_value',
}
db_api.export_location_metadata_update(
self.ctxt, export_location_uuid, updated_metadata, True)
result = db_api.export_location_metadata_get(
self.ctxt, export_location_uuid)
self.assertEqual(updated_metadata, result)
@ddt.data(
("k", "v"),
("k" * 256, "v"),
("k", "v" * 1024),
("k" * 256, "v" * 1024),
)
@ddt.unpack
def test_set_metadata_with_different_length(self, key, value):
export_location_uuid = self._get_export_location_uuid_by_path(
self.initial_locations[1])
metadata = {key: value}
db_api.export_location_metadata_update(
self.ctxt, export_location_uuid, metadata, False)
result = db_api.export_location_metadata_get(
self.ctxt, export_location_uuid)
self.assertEqual(metadata, result)
@ddt.ddt
class DriverPrivateDataDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(DriverPrivateDataDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_driver_test_data(self):
return uuidutils.generate_uuid()
@ddt.data({"details": {"foo": "bar", "tee": "too"},
"valid": {"foo": "bar", "tee": "too"}},
{"details": {"foo": "bar", "tee": ["test"]},
"valid": {"foo": "bar", "tee": str(["test"])}})
@ddt.unpack
def test_update(self, details, valid):
test_id = self._get_driver_test_data()
initial_data = db_api.driver_private_data_get(self.ctxt, test_id)
db_api.driver_private_data_update(self.ctxt, test_id, details)
actual_data = db_api.driver_private_data_get(self.ctxt, test_id)
self.assertEqual({}, initial_data)
self.assertEqual(valid, actual_data)
@ddt.data({'with_deleted': True, 'append': False},
{'with_deleted': True, 'append': True},
{'with_deleted': False, 'append': False},
{'with_deleted': False, 'append': True})
@ddt.unpack
def test_update_with_more_values(self, with_deleted, append):
test_id = self._get_driver_test_data()
details = {"tee": "too"}
more_details = {"foo": "bar"}
result = {"tee": "too", "foo": "bar"}
db_api.driver_private_data_update(self.ctxt, test_id, details)
if with_deleted:
db_api.driver_private_data_delete(self.ctxt, test_id)
if append:
more_details.update(details)
if with_deleted and not append:
result.pop("tee")
db_api.driver_private_data_update(self.ctxt, test_id, more_details)
actual_result = db_api.driver_private_data_get(self.ctxt,
test_id)
self.assertEqual(result, actual_result)
@ddt.data(True, False)
def test_update_with_duplicate(self, with_deleted):
test_id = self._get_driver_test_data()
details = {"tee": "too"}
db_api.driver_private_data_update(self.ctxt, test_id, details)
if with_deleted:
db_api.driver_private_data_delete(self.ctxt, test_id)
db_api.driver_private_data_update(self.ctxt, test_id, details)
actual_result = db_api.driver_private_data_get(self.ctxt,
test_id)
self.assertEqual(details, actual_result)
def test_update_with_delete_existing(self):
test_id = self._get_driver_test_data()
details = {"key1": "val1", "key2": "val2", "key3": "val3"}
details_update = {"key1": "val1_upd", "key4": "new_val"}
# Create new details
db_api.driver_private_data_update(self.ctxt, test_id, details)
db_api.driver_private_data_update(self.ctxt, test_id,
details_update, delete_existing=True)
actual_result = db_api.driver_private_data_get(
self.ctxt, test_id)
self.assertEqual(details_update, actual_result)
def test_get(self):
test_id = self._get_driver_test_data()
test_key = "foo"
test_keys = [test_key, "tee"]
details = {test_keys[0]: "val", test_keys[1]: "val", "mee": "foo"}
db_api.driver_private_data_update(self.ctxt, test_id, details)
actual_result_all = db_api.driver_private_data_get(
self.ctxt, test_id)
actual_result_single_key = db_api.driver_private_data_get(
self.ctxt, test_id, test_key)
actual_result_list = db_api.driver_private_data_get(
self.ctxt, test_id, test_keys)
self.assertEqual(details, actual_result_all)
self.assertEqual(details[test_key], actual_result_single_key)
self.assertEqual(dict.fromkeys(test_keys, "val"), actual_result_list)
def test_delete_single(self):
test_id = self._get_driver_test_data()
test_key = "foo"
details = {test_key: "bar", "tee": "too"}
valid_result = {"tee": "too"}
db_api.driver_private_data_update(self.ctxt, test_id, details)
db_api.driver_private_data_delete(self.ctxt, test_id, test_key)
actual_result = db_api.driver_private_data_get(
self.ctxt, test_id)
self.assertEqual(valid_result, actual_result)
def test_delete_all(self):
test_id = self._get_driver_test_data()
details = {"foo": "bar", "tee": "too"}
db_api.driver_private_data_update(self.ctxt, test_id, details)
db_api.driver_private_data_delete(self.ctxt, test_id)
actual_result = db_api.driver_private_data_get(
self.ctxt, test_id)
self.assertEqual({}, actual_result)
@ddt.ddt
class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase):
def __init__(self, *args, **kwargs):
super(ShareNetworkDatabaseAPITestCase, self).__init__(*args, **kwargs)
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
def setUp(self):
super(ShareNetworkDatabaseAPITestCase, self).setUp()
self.share_nw_dict = {'id': 'fake network id',
'project_id': self.fake_context.project_id,
'user_id': 'fake_user_id',
'name': 'whatever',
'description': 'fake description'}
def test_create_one_network(self):
result = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(0, len(result['share_instances']))
self.assertEqual(0, len(result['security_services']))
def test_create_two_networks_in_different_tenants(self):
share_nw_dict2 = self.share_nw_dict.copy()
share_nw_dict2['id'] = None
share_nw_dict2['project_id'] = 'fake project 2'
result1 = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
result2 = db_api.share_network_create(self.fake_context.elevated(),
share_nw_dict2)
self._check_fields(expected=self.share_nw_dict, actual=result1)
self._check_fields(expected=share_nw_dict2, actual=result2)
def test_create_two_networks_in_one_tenant(self):
share_nw_dict2 = self.share_nw_dict.copy()
share_nw_dict2['id'] += "suffix"
result1 = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
result2 = db_api.share_network_create(self.fake_context,
share_nw_dict2)
self._check_fields(expected=self.share_nw_dict, actual=result1)
self._check_fields(expected=share_nw_dict2, actual=result2)
def test_create_with_duplicated_id(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(db_exception.DBDuplicateEntry,
db_api.share_network_create,
self.fake_context,
self.share_nw_dict)
def test_get(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(0, len(result['share_instances']))
self.assertEqual(0, len(result['security_services']))
def _create_share_network_for_project(self, project_id):
ctx = context.RequestContext(user_id='fake user',
project_id=project_id,
is_admin=False)
share_data = self.share_nw_dict.copy()
share_data['project_id'] = project_id
db_api.share_network_create(ctx, share_data)
return share_data
def test_get_other_tenant_as_admin(self):
expected = self._create_share_network_for_project('fake project 2')
result = db_api.share_network_get(self.fake_context.elevated(),
self.share_nw_dict['id'])
self._check_fields(expected=expected, actual=result)
self.assertEqual(0, len(result['share_instances']))
self.assertEqual(0, len(result['security_services']))
def test_get_other_tenant(self):
self._create_share_network_for_project('fake project 2')
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_get,
self.fake_context,
self.share_nw_dict['id'])
@ddt.data([{'id': 'fake share id1'}],
[{'id': 'fake share id1'}, {'id': 'fake share id2'}],)
def test_get_with_shares(self, shares):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
share_instances = []
for share in shares:
share.update({'share_network_id': self.share_nw_dict['id']})
share_instances.append(
db_api.share_create(self.fake_context, share).instance
)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(shares), len(result['share_instances']))
for index, share_instance in enumerate(share_instances):
self.assertEqual(
share_instance['share_network_id'],
result['share_instances'][index]['share_network_id']
)
@ddt.data([{'id': 'fake security service id1', 'type': 'fake type'}],
[{'id': 'fake security service id1', 'type': 'fake type'},
{'id': 'fake security service id2', 'type': 'fake type'}])
def test_get_with_security_services(self, security_services):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
for service in security_services:
service.update({'project_id': self.fake_context.project_id})
db_api.security_service_create(self.fake_context, service)
db_api.share_network_add_security_service(
self.fake_context, self.share_nw_dict['id'], service['id'])
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(security_services),
len(result['security_services']))
for index, service in enumerate(security_services):
self._check_fields(expected=service,
actual=result['security_services'][index])
@ddt.data([{'id': 'fake_id_1', 'availability_zone_id': 'None'}],
[{'id': 'fake_id_2', 'availability_zone_id': 'None'},
{'id': 'fake_id_3', 'availability_zone_id': 'fake_az_id'}])
def test_get_with_subnets(self, subnets):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
for subnet in subnets:
subnet['share_network_id'] = self.share_nw_dict['id']
db_api.share_network_subnet_create(self.fake_context, subnet)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(subnets),
len(result['share_network_subnets']))
for index, subnet in enumerate(subnets):
self._check_fields(expected=subnet,
actual=result['share_network_subnets'][index])
def test_get_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_get,
self.fake_context,
'fake id')
def test_delete(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_network_delete(self.fake_context,
self.share_nw_dict['id'])
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_get,
self.fake_context,
self.share_nw_dict['id'])
def test_delete_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_delete,
self.fake_context,
'fake id')
def test_update(self):
new_name = 'fake_new_name'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
result_update = db_api.share_network_update(self.fake_context,
self.share_nw_dict['id'],
{'name': new_name})
result_get = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(new_name, result_update['name'])
self._check_fields(expected=dict(result_update.items()),
actual=dict(result_get.items()))
def test_update_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_update,
self.fake_context,
'fake id',
{})
@ddt.data(1, 2)
def test_get_all_one_record(self, records_count):
index = 0
share_networks = []
while index < records_count:
share_network_dict = dict(self.share_nw_dict)
fake_id = 'fake_id%s' % index
share_network_dict.update({'id': fake_id,
'project_id': fake_id})
share_networks.append(share_network_dict)
db_api.share_network_create(self.fake_context.elevated(),
share_network_dict)
index += 1
result = db_api.share_network_get_all(self.fake_context.elevated())
self.assertEqual(len(share_networks), len(result))
for index, net in enumerate(share_networks):
self._check_fields(expected=net, actual=result[index])
def test_get_all_by_project(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
share_nw_dict2 = dict(self.share_nw_dict)
share_nw_dict2['id'] = 'fake share nw id2'
share_nw_dict2['project_id'] = 'fake project 2'
new_context = context.RequestContext(user_id='fake user 2',
project_id='fake project 2',
is_admin=False)
db_api.share_network_create(new_context, share_nw_dict2)
result = db_api.share_network_get_all_by_project(
self.fake_context.elevated(),
share_nw_dict2['project_id'])
self.assertEqual(1, len(result))
self._check_fields(expected=share_nw_dict2, actual=result[0])
def test_add_security_service(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
result = (db_api.model_query(
self.fake_context,
models.ShareNetworkSecurityServiceAssociation).
filter_by(security_service_id=security_dict1['id']).
filter_by(share_network_id=self.share_nw_dict['id']).
first())
self.assertIsNotNone(result)
def test_add_security_service_not_found_01(self):
security_service_id = 'unknown security service'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(exception.SecurityServiceNotFound,
db_api.share_network_add_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_service_id)
def test_add_security_service_not_found_02(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
share_nw_id = 'unknown share network'
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_add_security_service,
self.fake_context,
share_nw_id,
security_dict1['id'])
def test_add_security_service_association_error_already_associated(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
self.assertRaises(
exception.ShareNetworkSecurityServiceAssociationError,
db_api.share_network_add_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
def test_remove_security_service(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
db_api.share_network_remove_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
result = (db_api.model_query(
self.fake_context,
models.ShareNetworkSecurityServiceAssociation).
filter_by(security_service_id=security_dict1['id']).
filter_by(share_network_id=self.share_nw_dict['id']).first())
self.assertIsNone(result)
share_nw_ref = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(0, len(share_nw_ref['security_services']))
def test_remove_security_service_not_found_01(self):
security_service_id = 'unknown security service'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(exception.SecurityServiceNotFound,
db_api.share_network_remove_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_service_id)
def test_remove_security_service_not_found_02(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
share_nw_id = 'unknown share network'
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_remove_security_service,
self.fake_context,
share_nw_id,
security_dict1['id'])
def test_remove_security_service_dissociation_error(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(
exception.ShareNetworkSecurityServiceDissociationError,
db_api.share_network_remove_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
def test_security_services_relation(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(0, len(result['security_services']))
def test_shares_relation(self):
share_dict = {'id': 'fake share id1'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_create(self.fake_context, share_dict)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(0, len(result['share_instances']))
def test_association_get(self):
network = db_api.share_network_create(
self.fake_context, self.share_nw_dict)
security_service = db_api.security_service_create(
self.fake_context, security_service_dict)
network_id = network['id']
security_service_id = security_service['id']
db_api.share_network_add_security_service(
self.fake_context, network_id, security_service_id)
result = db_api.share_network_security_service_association_get(
self.fake_context, network_id, security_service_id)
self.assertEqual(result['share_network_id'], network_id)
self.assertEqual(result['security_service_id'], security_service_id)
def test_share_network_update_security_service(self):
new_sec_service = copy.copy(security_service_dict)
new_sec_service['id'] = 'fakeid'
share_network_id = self.share_nw_dict['id']
db_api.share_network_create(
self.fake_context, self.share_nw_dict)
db_api.security_service_create(
self.fake_context, security_service_dict)
db_api.security_service_create(self.fake_context, new_sec_service)
db_api.share_network_add_security_service(
self.fake_context, share_network_id,
security_service_dict['id'])
db_api.share_network_update_security_service(
self.fake_context, share_network_id, security_service_dict['id'],
new_sec_service['id'])
association = db_api.share_network_security_service_association_get(
self.fake_context, share_network_id, new_sec_service['id'])
self.assertEqual(association['share_network_id'], share_network_id)
self.assertEqual(
association['security_service_id'], new_sec_service['id'])
@ddt.ddt
class ShareNetworkSubnetDatabaseAPITestCase(BaseDatabaseAPITestCase):
def __init__(self, *args, **kwargs):
super(ShareNetworkSubnetDatabaseAPITestCase, self).__init__(
*args, **kwargs)
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
def setUp(self):
super(ShareNetworkSubnetDatabaseAPITestCase, self).setUp()
self.subnet_dict = {'id': 'fake network id',
'neutron_net_id': 'fake net id',
'neutron_subnet_id': 'fake subnet id',
'network_type': 'vlan',
'segmentation_id': 1000,
'share_network_id': 'fake_id',
'cidr': '10.0.0.0/24',
'ip_version': 4,
'availability_zone_id': None}
def test_create(self):
result = db_api.share_network_subnet_create(
self.fake_context, self.subnet_dict)
self._check_fields(expected=self.subnet_dict, actual=result)
def test_create_duplicated_id(self):
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
self.assertRaises(db_exception.DBDuplicateEntry,
db_api.share_network_subnet_create,
self.fake_context,
self.subnet_dict)
def test_get(self):
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
result = db_api.share_network_subnet_get(self.fake_context,
self.subnet_dict['id'])
self._check_fields(expected=self.subnet_dict, actual=result)
@ddt.data([{'id': 'fake_id_1', 'identifier': 'fake_identifier',
'host': 'fake_host'}],
[{'id': 'fake_id_2', 'identifier': 'fake_identifier',
'host': 'fake_host'},
{'id': 'fake_id_3', 'identifier': 'fake_identifier',
'host': 'fake_host'}])
def test_get_with_share_servers(self, share_servers):
db_api.share_network_subnet_create(self.fake_context,
self.subnet_dict)
for share_server in share_servers:
share_server['share_network_subnet_id'] = self.subnet_dict['id']
db_api.share_server_create(self.fake_context, share_server)
result = db_api.share_network_subnet_get(self.fake_context,
self.subnet_dict['id'])
self.assertEqual(len(share_servers),
len(result['share_servers']))
for index, share_server in enumerate(share_servers):
self._check_fields(expected=share_server,
actual=result['share_servers'][index])
def test_get_not_found(self):
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
self.assertRaises(exception.ShareNetworkSubnetNotFound,
db_api.share_network_subnet_get,
self.fake_context,
'fake_id')
def test_delete(self):
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
db_api.share_network_subnet_delete(self.fake_context,
self.subnet_dict['id'])
self.assertRaises(exception.ShareNetworkSubnetNotFound,
db_api.share_network_subnet_delete,
self.fake_context,
self.subnet_dict['id'])
def test_delete_not_found(self):
self.assertRaises(exception.ShareNetworkSubnetNotFound,
db_api.share_network_subnet_delete,
self.fake_context,
'fake_id')
def test_update(self):
update_dict = {
'gateway': 'fake_gateway',
'ip_version': 6,
'mtu': ''
}
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
db_api.share_network_subnet_update(
self.fake_context, self.subnet_dict['id'], update_dict)
result = db_api.share_network_subnet_get(self.fake_context,
self.subnet_dict['id'])
self._check_fields(expected=update_dict, actual=result)
def test_update_not_found(self):
self.assertRaises(exception.ShareNetworkSubnetNotFound,
db_api.share_network_subnet_update,
self.fake_context,
self.subnet_dict['id'],
{})
@ddt.data([
{
'id': 'sn_id1',
'project_id': 'fake project',
'user_id': 'fake'
}
], [
{
'id': 'fake_id',
'project_id': 'fake project',
'user_id': 'fake'
},
{
'id': 'sn_id2',
'project_id': 'fake project',
'user_id': 'fake'
}
])
def test_get_all_by_share_network(self, share_networks):
for idx, share_network in enumerate(share_networks):
self.subnet_dict['share_network_id'] = share_network['id']
self.subnet_dict['id'] = 'fake_id%s' % idx
db_api.share_network_create(self.fake_context, share_network)
db_api.share_network_subnet_create(self.fake_context,
self.subnet_dict)
for share_network in share_networks:
subnets = db_api.share_network_subnet_get_all_by_share_network(
self.fake_context, share_network['id'])
self.assertEqual(1, len(subnets))
def test_get_by_availability_zone_id(self):
az = db_api.availability_zone_create_if_not_exist(self.fake_context,
'fake_zone_id')
self.subnet_dict['availability_zone_id'] = az['id']
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
result = db_api.share_network_subnet_get_by_availability_zone_id(
self.fake_context, self.subnet_dict['share_network_id'], az['id'])
self._check_fields(expected=self.subnet_dict, actual=result)
def test_get_default_subnet(self):
db_api.share_network_subnet_create(self.fake_context, self.subnet_dict)
result = db_api.share_network_subnet_get_default_subnet(
self.fake_context, self.subnet_dict['share_network_id'])
self._check_fields(expected=self.subnet_dict, actual=result)
@ddt.ddt
class SecurityServiceDatabaseAPITestCase(BaseDatabaseAPITestCase):
def __init__(self, *args, **kwargs):
super(SecurityServiceDatabaseAPITestCase, self).__init__(*args,
**kwargs)
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
def _check_expected_fields(self, result, expected):
for key in expected:
self.assertEqual(expected[key], result[key])
def test_create(self):
result = db_api.security_service_create(self.fake_context,
security_service_dict)
self._check_expected_fields(result, security_service_dict)
def test_create_with_duplicated_id(self):
db_api.security_service_create(self.fake_context,
security_service_dict)
self.assertRaises(db_exception.DBDuplicateEntry,
db_api.security_service_create,
self.fake_context,
security_service_dict)
def test_get(self):
db_api.security_service_create(self.fake_context,
security_service_dict)
result = db_api.security_service_get(self.fake_context,
security_service_dict['id'])
self._check_expected_fields(result, security_service_dict)
def test_get_not_found(self):
self.assertRaises(exception.SecurityServiceNotFound,
db_api.security_service_get,
self.fake_context,
'wrong id')
def test_delete(self):
db_api.security_service_create(self.fake_context,
security_service_dict)
db_api.security_service_delete(self.fake_context,
security_service_dict['id'])
self.assertRaises(exception.SecurityServiceNotFound,
db_api.security_service_get,
self.fake_context,
security_service_dict['id'])
def test_update(self):
update_dict = {
'dns_ip': 'new dns',
'server': 'new ldap server',
'domain': 'new ldap domain',
'ou': 'new ldap ou',
'user': 'new user',
'password': 'new password',
'name': 'new whatever',
'description': 'new nevermind',
}
db_api.security_service_create(self.fake_context,
security_service_dict)
result = db_api.security_service_update(self.fake_context,
security_service_dict['id'],
update_dict)
self._check_expected_fields(result, update_dict)
def test_update_no_updates(self):
db_api.security_service_create(self.fake_context,
security_service_dict)
result = db_api.security_service_update(self.fake_context,
security_service_dict['id'],
{})
self._check_expected_fields(result, security_service_dict)
def test_update_not_found(self):
self.assertRaises(exception.SecurityServiceNotFound,
db_api.security_service_update,
self.fake_context,
'wrong id',
{})
def test_get_all_no_records(self):
result = db_api.security_service_get_all(self.fake_context)
self.assertEqual(0, len(result))
@ddt.data(1, 2)
def test_get_all(self, records_count):
index = 0
services = []
while index < records_count:
service_dict = dict(security_service_dict)
service_dict.update({'id': 'fake_id%s' % index})
services.append(service_dict)
db_api.security_service_create(self.fake_context, service_dict)
index += 1
result = db_api.security_service_get_all(self.fake_context)
self.assertEqual(len(services), len(result))
for index, service in enumerate(services):
self._check_fields(expected=service, actual=result[index])
def test_get_all_two_records(self):
dict1 = security_service_dict
dict2 = security_service_dict.copy()
dict2['id'] = 'fake id 2'
db_api.security_service_create(self.fake_context,
dict1)
db_api.security_service_create(self.fake_context,
dict2)
result = db_api.security_service_get_all(self.fake_context)
self.assertEqual(2, len(result))
def test_get_all_by_project(self):
dict1 = security_service_dict
dict2 = security_service_dict.copy()
dict2['id'] = 'fake id 2'
dict2['project_id'] = 'fake project 2'
db_api.security_service_create(self.fake_context,
dict1)
db_api.security_service_create(self.fake_context,
dict2)
result1 = db_api.security_service_get_all_by_project(
self.fake_context,
dict1['project_id'])
self.assertEqual(1, len(result1))
self._check_expected_fields(result1[0], dict1)
result2 = db_api.security_service_get_all_by_project(
self.fake_context,
dict2['project_id'])
self.assertEqual(1, len(result2))
self._check_expected_fields(result2[0], dict2)
@ddt.ddt
class ShareServerDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(ShareServerDatabaseAPITestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
def test_share_server_get(self):
expected = db_utils.create_share_server()
server = db_api.share_server_get(self.ctxt, expected['id'])
self.assertEqual(expected['id'], server['id'])
self.assertEqual(expected.share_network_subnet_id,
server.share_network_subnet_id)
self.assertEqual(expected.host, server.host)
self.assertEqual(expected.status, server.status)
def test_get_not_found(self):
fake_id = 'FAKE_UUID'
self.assertRaises(exception.ShareServerNotFound,
db_api.share_server_get, self.ctxt, fake_id)
def test_create(self):
server = db_utils.create_share_server()
self.assertTrue(server['id'])
self.assertEqual(server.share_network_subnet_id,
server['share_network_subnet_id'])
self.assertEqual(server.host, server['host'])
self.assertEqual(server.status, server['status'])
def test_delete(self):
server = db_utils.create_share_server()
num_records = len(db_api.share_server_get_all(self.ctxt))
db_api.share_server_delete(self.ctxt, server['id'])
self.assertEqual(num_records - 1,
len(db_api.share_server_get_all(self.ctxt)))
def test_delete_not_found(self):
fake_id = 'FAKE_UUID'
self.assertRaises(exception.ShareServerNotFound,
db_api.share_server_delete,
self.ctxt, fake_id)
def test_update(self):
update = {
'share_network_id': 'update_net',
'host': 'update_host',
'status': constants.STATUS_ACTIVE,
}
server = db_utils.create_share_server()
updated_server = db_api.share_server_update(self.ctxt, server['id'],
update)
self.assertEqual(server['id'], updated_server['id'])
self.assertEqual(update['share_network_id'],
updated_server.share_network_id)
self.assertEqual(update['host'], updated_server.host)
self.assertEqual(update['status'], updated_server.status)
def test_update_not_found(self):
fake_id = 'FAKE_UUID'
self.assertRaises(exception.ShareServerNotFound,
db_api.share_server_update,
self.ctxt, fake_id, {})
def test_get_all_by_host_and_share_net_valid(self):
subnet_1 = {
'id': '1',
'share_network_id': '1',
}
subnet_2 = {
'id': '2',
'share_network_id': '2',
}
valid = {
'share_network_subnet_id': '1',
'host': 'host1',
'status': constants.STATUS_ACTIVE,
}
invalid = {
'share_network_subnet_id': '2',
'host': 'host1',
'status': constants.STATUS_ERROR,
}
other = {
'share_network_subnet_id': '1',
'host': 'host2',
'status': constants.STATUS_ACTIVE,
}
db_utils.create_share_network_subnet(**subnet_1)
db_utils.create_share_network_subnet(**subnet_2)
valid = db_utils.create_share_server(**valid)
db_utils.create_share_server(**invalid)
db_utils.create_share_server(**other)
servers = db_api.share_server_get_all_by_host_and_share_subnet_valid(
self.ctxt,
host='host1',
share_subnet_id='1')
self.assertEqual(valid['id'], servers[0]['id'])
def test_get_all_by_host_and_share_net_not_found(self):
self.assertRaises(
exception.ShareServerNotFound,
db_api.share_server_get_all_by_host_and_share_subnet_valid,
self.ctxt, host='fake', share_subnet_id='fake'
)
def test_get_all(self):
srv1 = {
'share_network_id': '1',
'host': 'host1',
'status': constants.STATUS_ACTIVE,
}
srv2 = {
'share_network_id': '1',
'host': 'host1',
'status': constants.STATUS_ERROR,
}
srv3 = {
'share_network_id': '2',
'host': 'host2',
'status': constants.STATUS_ACTIVE,
}
servers = db_api.share_server_get_all(self.ctxt)
self.assertEqual(0, len(servers))
to_delete = db_utils.create_share_server(**srv1)
db_utils.create_share_server(**srv2)
db_utils.create_share_server(**srv3)
servers = db_api.share_server_get_all(self.ctxt)
self.assertEqual(3, len(servers))
db_api.share_server_delete(self.ctxt, to_delete['id'])
servers = db_api.share_server_get_all(self.ctxt)
self.assertEqual(2, len(servers))
def test_backend_details_set(self):
details = {
'value1': '1',
'value2': '2',
}
server = db_utils.create_share_server()
db_api.share_server_backend_details_set(self.ctxt, server['id'],
details)
self.assertDictMatch(
details,
db_api.share_server_get(self.ctxt, server['id'])['backend_details']
)
def test_backend_details_set_not_found(self):
fake_id = 'FAKE_UUID'
self.assertRaises(exception.ShareServerNotFound,
db_api.share_server_backend_details_set,
self.ctxt, fake_id, {})
def test_get_with_details(self):
values = {
'share_network_subnet_id': 'fake-share-net-id',
'host': 'hostname',
'status': constants.STATUS_ACTIVE,
}
details = {
'value1': '1',
'value2': '2',
}
srv_id = db_utils.create_share_server(**values)['id']
db_api.share_server_backend_details_set(self.ctxt, srv_id, details)
server = db_api.share_server_get(self.ctxt, srv_id)
self.assertEqual(srv_id, server['id'])
self.assertEqual(values['share_network_subnet_id'],
server.share_network_subnet_id)
self.assertEqual(values['host'], server.host)
self.assertEqual(values['status'], server.status)
self.assertDictMatch(server['backend_details'], details)
self.assertIn('backend_details', server.to_dict())
def test_delete_with_details(self):
server = db_utils.create_share_server(backend_details={
'value1': '1',
'value2': '2',
})
num_records = len(db_api.share_server_get_all(self.ctxt))
db_api.share_server_delete(self.ctxt, server['id'])
self.assertEqual(num_records - 1,
len(db_api.share_server_get_all(self.ctxt)))
@ddt.data('fake', '-fake-', 'foo_some_fake_identifier_bar',
'foo-some-fake-identifier-bar', 'foobar')
def test_share_server_search_by_identifier(self, identifier):
server = {
'share_network_id': 'fake-share-net-id',
'host': 'hostname',
'status': constants.STATUS_ACTIVE,
'is_auto_deletable': True,
'updated_at': datetime.datetime(2018, 5, 1),
'identifier': 'some_fake_identifier',
}
server = db_utils.create_share_server(**server)
if identifier == 'foobar':
self.assertRaises(exception.ShareServerNotFound,
db_api.share_server_search_by_identifier,
self.ctxt, identifier)
else:
result = db_api.share_server_search_by_identifier(
self.ctxt, identifier)
self.assertEqual(server['id'], result[0]['id'])
@ddt.data((True, True, True, 3),
(True, True, False, 2),
(True, False, False, 1),
(False, False, False, 0))
@ddt.unpack
def test_share_server_get_all_unused_deletable(self,
server_1_is_auto_deletable,
server_2_is_auto_deletable,
server_3_is_auto_deletable,
expected_len):
server1 = {
'share_network_id': 'fake-share-net-id',
'host': 'hostname',
'status': constants.STATUS_ACTIVE,
'is_auto_deletable': server_1_is_auto_deletable,
'updated_at': datetime.datetime(2018, 5, 1)
}
server2 = {
'share_network_id': 'fake-share-net-id',
'host': 'hostname',
'status': constants.STATUS_ACTIVE,
'is_auto_deletable': server_2_is_auto_deletable,
'updated_at': datetime.datetime(2018, 5, 1)
}
server3 = {
'share_network_id': 'fake-share-net-id',
'host': 'hostname',
'status': constants.STATUS_ACTIVE,
'is_auto_deletable': server_3_is_auto_deletable,
'updated_at': datetime.datetime(2018, 5, 1)
}
db_utils.create_share_server(**server1)
db_utils.create_share_server(**server2)
db_utils.create_share_server(**server3)
host = 'hostname'
updated_before = datetime.datetime(2019, 5, 1)
unused_deletable = db_api.share_server_get_all_unused_deletable(
self.ctxt, host, updated_before)
self.assertEqual(expected_len, len(unused_deletable))
@ddt.data({'host': 'fakepool@fakehost'},
{'status': constants.STATUS_SERVER_MIGRATING_TO},
{'source_share_server_id': 'fake_ss_id'},
{'share_network_id': uuidutils.generate_uuid()})
def test_share_server_get_all_with_filters(self, filters):
server_data = copy.copy(filters)
share_network_id = server_data.pop('share_network_id', None)
share_network_subnet = {}
if share_network_id:
db_utils.create_share_network(id=share_network_id)
share_network_subnet = db_utils.create_share_network_subnet(
id=uuidutils.generate_uuid(),
share_network_id=share_network_id)
server_data['share_network_subnet_id'] = share_network_subnet['id']
db_utils.create_share_server(**server_data)
db_utils.create_share_server()
filter_keys = filters.keys()
results = db_api.share_server_get_all_with_filters(self.ctxt, filters)
self.assertEqual(1, len(results))
for result in results:
for key in filter_keys:
if key == 'share_network_id':
self.assertEqual(share_network_subnet['share_network_id'],
filters[key])
self.assertEqual(share_network_subnet['id'],
result['share_network_subnet_id'])
else:
self.assertEqual(result[key], filters[key])
@ddt.data('fake@fake', 'host1@backend1')
def test_share_server_get_all_by_host(self, host):
db_utils.create_share_server(host='fake@fake')
db_utils.create_share_server(host='host1@backend1')
share_servers = db_api.share_server_get_all_by_host(self.ctxt, host)
self.assertEqual(1, len(share_servers))
for share_server in share_servers:
self.assertEqual(host, share_server['host'])
def test_share_servers_update(self):
servers = [db_utils.create_share_server()
for __ in range(1, 3)]
server_ids = [server['id'] for server in servers]
values = {'status': constants.STATUS_NETWORK_CHANGE}
db_api.share_servers_update(
self.ctxt, server_ids, values)
share_servers = [
db_api.share_server_get(self.ctxt, server_id)
for server_id in server_ids]
for ss in share_servers:
self.assertEqual(constants.STATUS_NETWORK_CHANGE, ss['status'])
class ServiceDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(ServiceDatabaseAPITestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
self.service_data = {'host': "fake_host",
'binary': "fake_binary",
'topic': "fake_topic",
'report_count': 0,
'availability_zone': "fake_zone"}
def test_create(self):
service = db_api.service_create(self.ctxt, self.service_data)
az = db_api.availability_zone_get(self.ctxt, "fake_zone")
self.assertEqual(az.id, service.availability_zone_id)
self.assertSubDictMatch(self.service_data, service.to_dict())
def test_update(self):
az_name = 'fake_zone2'
update_data = {"availability_zone": az_name}
service = db_api.service_create(self.ctxt, self.service_data)
db_api.service_update(self.ctxt, service['id'], update_data)
service = db_api.service_get(self.ctxt, service['id'])
az = db_api.availability_zone_get(self.ctxt, az_name)
self.assertEqual(az.id, service.availability_zone_id)
valid_values = self.service_data
valid_values.update(update_data)
self.assertSubDictMatch(valid_values, service.to_dict())
@ddt.ddt
class AvailabilityZonesDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(AvailabilityZonesDatabaseAPITestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
@ddt.data({'fake': 'fake'}, {}, {'fakeavailability_zone': 'fake'},
{'availability_zone': None}, {'availability_zone': ''})
def test__ensure_availability_zone_exists_invalid(self, test_values):
session = db_api.get_session()
self.assertRaises(ValueError, db_api._ensure_availability_zone_exists,
self.ctxt, test_values, session)
def test_az_get(self):
az_name = 'test_az'
az = db_api.availability_zone_create_if_not_exist(self.ctxt, az_name)
az_by_id = db_api.availability_zone_get(self.ctxt, az['id'])
az_by_name = db_api.availability_zone_get(self.ctxt, az_name)
self.assertEqual(az_name, az_by_id['name'])
self.assertEqual(az_name, az_by_name['name'])
self.assertEqual(az['id'], az_by_id['id'])
self.assertEqual(az['id'], az_by_name['id'])
def test_az_get_all(self):
db_api.availability_zone_create_if_not_exist(self.ctxt, 'test1')
db_api.availability_zone_create_if_not_exist(self.ctxt, 'test2')
db_api.availability_zone_create_if_not_exist(self.ctxt, 'test3')
db_api.service_create(self.ctxt, {'availability_zone': 'test2'})
actual_result = db_api.availability_zone_get_all(self.ctxt)
self.assertEqual(1, len(actual_result))
self.assertEqual('test2', actual_result[0]['name'])
@ddt.ddt
class NetworkAllocationsDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(NetworkAllocationsDatabaseAPITestCase, self).setUp()
self.user_id = 'user_id'
self.project_id = 'project_id'
self.share_server_id = 'foo_share_server_id'
self.ctxt = context.RequestContext(
user_id=self.user_id, project_id=self.project_id, is_admin=True)
self.user_network_allocations = [
{'share_server_id': self.share_server_id,
'ip_address': '1.1.1.1',
'status': constants.STATUS_ACTIVE,
'label': None},
{'share_server_id': self.share_server_id,
'ip_address': '2.2.2.2',
'status': constants.STATUS_ACTIVE,
'label': 'user'},
]
self.admin_network_allocations = [
{'share_server_id': self.share_server_id,
'ip_address': '3.3.3.3',
'status': constants.STATUS_ACTIVE,
'label': 'admin'},
{'share_server_id': self.share_server_id,
'ip_address': '4.4.4.4',
'status': constants.STATUS_ACTIVE,
'label': 'admin'},
]
def _setup_network_allocations_get_for_share_server(self):
# Create share network
share_network_data = {
'id': 'foo_share_network_id',
'user_id': self.user_id,
'project_id': self.project_id,
}
db_api.share_network_create(self.ctxt, share_network_data)
# Create share server
share_server_data = {
'id': self.share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
db_api.share_server_create(self.ctxt, share_server_data)
# Create user network allocations
for user_network_allocation in self.user_network_allocations:
db_api.network_allocation_create(
self.ctxt, user_network_allocation)
# Create admin network allocations
for admin_network_allocation in self.admin_network_allocations:
db_api.network_allocation_create(
self.ctxt, admin_network_allocation)
def test_get_only_user_network_allocations(self):
self._setup_network_allocations_get_for_share_server()
result = db_api.network_allocations_get_for_share_server(
self.ctxt, self.share_server_id, label='user')
self.assertEqual(
len(self.user_network_allocations), len(result))
for na in result:
self.assertIn(na.label, (None, 'user'))
def test_get_only_admin_network_allocations(self):
self._setup_network_allocations_get_for_share_server()
result = db_api.network_allocations_get_for_share_server(
self.ctxt, self.share_server_id, label='admin')
self.assertEqual(
len(self.admin_network_allocations), len(result))
for na in result:
self.assertEqual(na.label, 'admin')
def test_get_all_network_allocations(self):
self._setup_network_allocations_get_for_share_server()
result = db_api.network_allocations_get_for_share_server(
self.ctxt, self.share_server_id, label=None)
self.assertEqual(
len(self.user_network_allocations +
self.admin_network_allocations),
len(result)
)
for na in result:
self.assertIn(na.label, ('admin', 'user', None))
def test_network_allocation_get(self):
self._setup_network_allocations_get_for_share_server()
for allocation in self.admin_network_allocations:
result = db_api.network_allocation_get(self.ctxt, allocation['id'])
self.assertIsInstance(result, models.NetworkAllocation)
self.assertEqual(allocation['id'], result.id)
for allocation in self.user_network_allocations:
result = db_api.network_allocation_get(self.ctxt, allocation['id'])
self.assertIsInstance(result, models.NetworkAllocation)
self.assertEqual(allocation['id'], result.id)
def test_network_allocation_get_no_result(self):
self._setup_network_allocations_get_for_share_server()
self.assertRaises(exception.NotFound,
db_api.network_allocation_get,
self.ctxt,
id='fake')
@ddt.data(True, False)
def test_network_allocation_get_read_deleted(self, read_deleted):
self._setup_network_allocations_get_for_share_server()
deleted_allocation = {
'share_server_id': self.share_server_id,
'ip_address': '1.1.1.1',
'status': constants.STATUS_ACTIVE,
'label': None,
'deleted': True,
}
new_obj = db_api.network_allocation_create(self.ctxt,
deleted_allocation)
if read_deleted:
result = db_api.network_allocation_get(self.ctxt, new_obj.id,
read_deleted=read_deleted)
self.assertIsInstance(result, models.NetworkAllocation)
self.assertEqual(new_obj.id, result.id)
else:
self.assertRaises(exception.NotFound,
db_api.network_allocation_get,
self.ctxt,
id=self.share_server_id)
def test_network_allocation_update(self):
self._setup_network_allocations_get_for_share_server()
for allocation in self.admin_network_allocations:
old_obj = db_api.network_allocation_get(self.ctxt,
allocation['id'])
self.assertEqual('False', old_obj.deleted)
updated_object = db_api.network_allocation_update(
self.ctxt, allocation['id'], {'deleted': 'True'})
self.assertEqual('True', updated_object.deleted)
@ddt.data(True, False)
def test_network_allocation_update_read_deleted(self, read_deleted):
self._setup_network_allocations_get_for_share_server()
db_api.network_allocation_update(
self.ctxt,
self.admin_network_allocations[0]['id'],
{'deleted': 'True'}
)
if read_deleted:
updated_object = db_api.network_allocation_update(
self.ctxt, self.admin_network_allocations[0]['id'],
{'deleted': 'False'}, read_deleted=read_deleted
)
self.assertEqual('False', updated_object.deleted)
else:
self.assertRaises(exception.NotFound,
db_api.network_allocation_update,
self.ctxt,
id=self.share_server_id,
values={'deleted': read_deleted},
read_deleted=read_deleted)
class ReservationDatabaseAPITest(test.TestCase):
def setUp(self):
super(ReservationDatabaseAPITest, self).setUp()
self.context = context.get_admin_context()
def test_reservation_expire(self):
quota_usage = db_api.quota_usage_create(self.context, 'fake_project',
'fake_user', 'fake_resource',
0, 12, until_refresh=None)
session = db_api.get_session()
for time_s in (-1, 1):
reservation = db_api._reservation_create(
self.context, 'fake_uuid',
quota_usage, 'fake_project',
'fake_user', 'fake_resource', 10,
timeutils.utcnow() +
datetime.timedelta(days=time_s),
session=session)
db_api.reservation_expire(self.context)
reservations = db_api._quota_reservations_query(session, self.context,
['fake_uuid']).all()
quota_usage = db_api.quota_usage_get(self.context, 'fake_project',
'fake_resource')
self.assertEqual(1, len(reservations))
self.assertEqual(reservation['id'], reservations[0]['id'])
self.assertEqual(2, quota_usage['reserved'])
@ddt.ddt
class PurgeDeletedTest(test.TestCase):
def setUp(self):
super(PurgeDeletedTest, self).setUp()
self.context = context.get_admin_context()
def _days_ago(self, begin, end):
return timeutils.utcnow() - datetime.timedelta(
days=random.randint(begin, end))
def _turn_on_foreign_key(self):
engine = db_api.get_engine()
connection = engine.raw_connection()
try:
cursor = connection.cursor()
cursor.execute("PRAGMA foreign_keys = ON")
finally:
connection.close()
@ddt.data({"del_days": 0, "num_left": 0},
{"del_days": 10, "num_left": 2},
{"del_days": 20, "num_left": 4})
@ddt.unpack
def test_purge_records_with_del_days(self, del_days, num_left):
fake_now = timeutils.utcnow()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fake_now)):
# create resources soft-deleted in 0~9, 10~19 days ago
for start, end in ((0, 9), (10, 19)):
for unused in range(2):
# share type
db_utils.create_share_type(id=uuidutils.generate_uuid(),
deleted_at=self._days_ago(start,
end))
# share
share = db_utils.create_share_without_instance(
metadata={},
deleted_at=self._days_ago(start, end))
# create share network
network = db_utils.create_share_network(
id=uuidutils.generate_uuid(),
deleted_at=self._days_ago(start, end))
# create security service
db_utils.create_security_service(
id=uuidutils.generate_uuid(),
share_network_id=network.id,
deleted_at=self._days_ago(start, end))
# create share instance
s_instance = db_utils.create_share_instance(
id=uuidutils.generate_uuid(),
share_network_id=network.id,
share_id=share.id)
# share access
db_utils.create_share_access(
id=uuidutils.generate_uuid(),
share_id=share['id'],
deleted_at=self._days_ago(start, end))
# create share server
db_utils.create_share_server(
id=uuidutils.generate_uuid(),
deleted_at=self._days_ago(start, end),
share_network_id=network.id)
# create snapshot
db_api.share_snapshot_create(
self.context, {'share_id': share['id'],
'deleted_at': self._days_ago(start,
end)},
create_snapshot_instance=False)
# update share instance
db_api.share_instance_update(
self.context,
s_instance.id,
{'deleted_at': self._days_ago(start, end)})
db_api.purge_deleted_records(self.context, age_in_days=del_days)
for model in [models.ShareTypes, models.Share,
models.ShareNetwork, models.ShareAccessMapping,
models.ShareInstance, models.ShareServer,
models.ShareSnapshot, models.SecurityService]:
rows = db_api.model_query(self.context, model).count()
self.assertEqual(num_left, rows)
def test_purge_records_with_illegal_args(self):
self.assertRaises(TypeError, db_api.purge_deleted_records,
self.context)
self.assertRaises(exception.InvalidParameterValue,
db_api.purge_deleted_records,
self.context,
age_in_days=-1)
def test_purge_records_with_constraint(self):
self._turn_on_foreign_key()
type_id = uuidutils.generate_uuid()
# create share type1
db_utils.create_share_type(id=type_id,
deleted_at=self._days_ago(1, 1))
# create share type2
db_utils.create_share_type(id=uuidutils.generate_uuid(),
deleted_at=self._days_ago(1, 1))
# create share
share = db_utils.create_share(share_type_id=type_id)
db_api.purge_deleted_records(self.context, age_in_days=0)
type_row = db_api.model_query(self.context,
models.ShareTypes).count()
# share type1 should not be deleted
self.assertEqual(1, type_row)
db_api.model_query(self.context, models.ShareInstance).delete()
db_api.share_delete(self.context, share['id'])
db_api.purge_deleted_records(self.context, age_in_days=0)
s_row = db_api.model_query(self.context, models.Share).count()
type_row = db_api.model_query(self.context,
models.ShareTypes).count()
self.assertEqual(0, s_row + type_row)
@ddt.ddt
class ShareTypeAPITestCase(test.TestCase):
def setUp(self):
super(ShareTypeAPITestCase, self).setUp()
self.ctxt = context.RequestContext(
user_id='user_id', project_id='project_id', is_admin=True)
@ddt.data({'used_by_shares': True, 'used_by_group_types': False},
{'used_by_shares': False, 'used_by_group_types': True},
{'used_by_shares': True, 'used_by_group_types': True})
@ddt.unpack
def test_share_type_destroy_in_use(self, used_by_shares,
used_by_group_types):
share_type_1 = db_utils.create_share_type(
name='orange', extra_specs={'somekey': 'someval'},
is_public=False, override_defaults=True)
share_type_2 = db_utils.create_share_type(
name='regalia', override_defaults=True)
db_api.share_type_access_add(self.ctxt,
share_type_1['id'],
"2018ndaetfigovnsaslcahfavmrpions")
db_api.share_type_access_add(self.ctxt,
share_type_1['id'],
"2016ndaetfigovnsaslcahfavmrpions")
if used_by_shares:
share_1 = db_utils.create_share(share_type_id=share_type_1['id'])
db_utils.create_share(share_type_id=share_type_2['id'])
if used_by_group_types:
group_type_1 = db_utils.create_share_group_type(
name='crimson', share_types=[share_type_1['id']])
db_utils.create_share_group_type(
name='tide', share_types=[share_type_2['id']])
share_group_1 = db_utils.create_share_group(
share_group_type_id=group_type_1['id'],
share_types=[share_type_1['id']])
self.assertRaises(exception.ShareTypeInUse,
db_api.share_type_destroy,
self.ctxt, share_type_1['id'])
self.assertRaises(exception.ShareTypeInUse,
db_api.share_type_destroy,
self.ctxt, share_type_2['id'])
# Let's cleanup share_type_1 and verify it is gone
if used_by_shares:
db_api.share_instance_delete(self.ctxt, share_1.instance.id)
if used_by_group_types:
db_api.share_group_destroy(self.ctxt, share_group_1['id'])
db_api.share_group_type_destroy(self.ctxt,
group_type_1['id'])
self.assertIsNone(
db_api.share_type_destroy(self.ctxt, share_type_1['id']))
self.assertDictMatch(
{}, db_api.share_type_extra_specs_get(
self.ctxt, share_type_1['id']))
self.assertRaises(exception.ShareTypeNotFound,
db_api.share_type_access_get_all,
self.ctxt, share_type_1['id'])
self.assertRaises(exception.ShareTypeNotFound,
db_api.share_type_get,
self.ctxt, share_type_1['id'])
# share_type_2 must still be around
self.assertEqual(
share_type_2['id'],
db_api.share_type_get(self.ctxt, share_type_2['id'])['id'])
@ddt.data({'usages': False, 'reservations': False},
{'usages': False, 'reservations': True},
{'usages': True, 'reservations': False})
@ddt.unpack
def test_share_type_destroy_quotas_and_reservations(self, usages,
reservations):
share_type = db_utils.create_share_type(name='clemsontigers')
shares_quota = db_api.quota_create(
self.ctxt, "fake-project-id", 'shares', 10,
share_type_id=share_type['id'])
snapshots_quota = db_api.quota_create(
self.ctxt, "fake-project-id", 'snapshots', 30,
share_type_id=share_type['id'])
if reservations:
resources = {
'shares': quota.ReservableResource('shares', '_sync_shares'),
'snapshots': quota.ReservableResource(
'snapshots', '_sync_snapshots'),
}
project_quotas = {
'shares': shares_quota.hard_limit,
'snapshots': snapshots_quota.hard_limit,
}
user_quotas = {
'shares': shares_quota.hard_limit,
'snapshots': snapshots_quota.hard_limit,
}
deltas = {'shares': 1, 'snapshots': 3}
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
reservation_uuids = db_api.quota_reserve(
self.ctxt, resources, project_quotas, user_quotas,
project_quotas, deltas, expire, False, 30,
project_id='fake-project-id', share_type_id=share_type['id'])
db_session = db_api.get_session()
q_reservations = db_api._quota_reservations_query(
db_session, self.ctxt, reservation_uuids).all()
# There should be 2 "user" reservations and 2 "share-type"
# quota reservations
self.assertEqual(4, len(q_reservations))
q_share_type_reservations = [qr for qr in q_reservations
if qr['share_type_id'] is not None]
# There should be exactly two "share type" quota reservations
self.assertEqual(2, len(q_share_type_reservations))
for q_reservation in q_share_type_reservations:
self.assertEqual(q_reservation['share_type_id'],
share_type['id'])
if usages:
db_api.quota_usage_create(self.ctxt, 'fake-project-id',
'fake-user-id', 'shares', 3, 2, False,
share_type_id=share_type['id'])
db_api.quota_usage_create(self.ctxt, 'fake-project-id',
'fake-user-id', 'snapshots', 2, 2, False,
share_type_id=share_type['id'])
q_usages = db_api.quota_usage_get_all_by_project_and_share_type(
self.ctxt, 'fake-project-id', share_type['id'])
self.assertEqual(3, q_usages['shares']['in_use'])
self.assertEqual(2, q_usages['shares']['reserved'])
self.assertEqual(2, q_usages['snapshots']['in_use'])
self.assertEqual(2, q_usages['snapshots']['reserved'])
# Validate that quotas exist
share_type_quotas = db_api.quota_get_all_by_project_and_share_type(
self.ctxt, 'fake-project-id', share_type['id'])
expected_quotas = {
'project_id': 'fake-project-id',
'share_type_id': share_type['id'],
'shares': 10,
'snapshots': 30,
}
self.assertDictMatch(expected_quotas, share_type_quotas)
db_api.share_type_destroy(self.ctxt, share_type['id'])
self.assertRaises(exception.ShareTypeNotFound,
db_api.share_type_get,
self.ctxt, share_type['id'])
# Quotas must be gone
share_type_quotas = db_api.quota_get_all_by_project_and_share_type(
self.ctxt, 'fake-project-id', share_type['id'])
self.assertEqual({'project_id': 'fake-project-id',
'share_type_id': share_type['id']},
share_type_quotas)
# Check usages and reservations
if usages:
q_usages = db_api.quota_usage_get_all_by_project_and_share_type(
self.ctxt, 'fake-project-id', share_type['id'])
expected_q_usages = {'project_id': 'fake-project-id',
'share_type_id': share_type['id']}
self.assertDictMatch(expected_q_usages, q_usages)
if reservations:
q_reservations = db_api._quota_reservations_query(
db_session, self.ctxt, reservation_uuids).all()
# just "user" quota reservations should be left, since we didn't
# clean them up.
self.assertEqual(2, len(q_reservations))
for q_reservation in q_reservations:
self.assertIsNone(q_reservation['share_type_id'])
@ddt.data(
(None, None, 5),
('fake2', None, 2),
(None, 'fake', 3),
)
@ddt.unpack
def test_share_replica_data_get_for_project(
self, user_id, share_type_id, expected_result):
kwargs = {}
if share_type_id:
kwargs.update({'id': share_type_id})
share_type_1 = db_utils.create_share_type(**kwargs)
share_type_2 = db_utils.create_share_type()
share_1 = db_utils.create_share(size=1, user_id='fake',
share_type_id=share_type_1['id'])
share_2 = db_utils.create_share(size=1, user_id='fake2',
share_type_id=share_type_2['id'])
project_id = share_1['project_id']
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_ACTIVE,
share_id=share_1['id'], share_type_id=share_type_1['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_IN_SYNC,
share_id=share_1['id'], share_type_id=share_type_1['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_IN_SYNC,
share_id=share_1['id'], share_type_id=share_type_1['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_ACTIVE,
share_id=share_2['id'], share_type_id=share_type_2['id'])
db_utils.create_share_replica(
replica_state=constants.REPLICA_STATE_IN_SYNC,
share_id=share_2['id'], share_type_id=share_type_2['id'])
kwargs = {}
if user_id:
kwargs.update({'user_id': user_id})
if share_type_id:
kwargs.update({'share_type_id': share_type_id})
total_amount, total_size = db_api.share_replica_data_get_for_project(
self.ctxt, project_id, **kwargs)
self.assertEqual(expected_result, total_amount)
self.assertEqual(expected_result, total_size)
def test_share_type_get_by_name_or_id_found_by_id(self):
share_type = db_utils.create_share_type()
result = db_api.share_type_get_by_name_or_id(
self.ctxt, share_type['id'])
self.assertIsNotNone(result)
self.assertEqual(share_type['id'], result['id'])
def test_share_type_get_by_name_or_id_found_by_name(self):
name = uuidutils.generate_uuid()
db_utils.create_share_type(name=name)
result = db_api.share_type_get_by_name_or_id(self.ctxt, name)
self.assertIsNotNone(result)
self.assertEqual(name, result['name'])
self.assertNotEqual(name, result['id'])
def test_share_type_get_by_name_or_id_when_does_not_exist(self):
fake_id = uuidutils.generate_uuid()
result = db_api.share_type_get_by_name_or_id(self.ctxt, fake_id)
self.assertIsNone(result)
def test_share_type_get_with_none_id(self):
self.assertRaises(exception.DefaultShareTypeNotConfigured,
db_api.share_type_get, self.ctxt, None)
@ddt.data(
{'name': 'st_1', 'description': 'des_1', 'is_public': True},
{'name': 'st_2', 'description': 'des_2', 'is_public': None},
{'name': 'st_3', 'description': None, 'is_public': False},
{'name': None, 'description': 'des_4', 'is_public': True},
)
@ddt.unpack
def test_share_type_update(self, name, description, is_public):
values = {}
if name:
values.update({'name': name})
if description:
values.update({'description': description})
if is_public is not None:
values.update({'is_public': is_public})
share_type = db_utils.create_share_type(name='st_name')
db_api.share_type_update(self.ctxt, share_type['id'], values)
updated_st = db_api.share_type_get_by_name_or_id(self.ctxt,
share_type['id'])
if name:
self.assertEqual(name, updated_st['name'])
if description:
self.assertEqual(description, updated_st['description'])
if is_public is not None:
self.assertEqual(is_public, updated_st['is_public'])
def test_share_type_update_not_found(self):
share_type = db_utils.create_share_type(name='st_update_test')
db_api.share_type_destroy(self.ctxt, share_type['id'])
values = {"name": "not_exist"}
self.assertRaises(exception.ShareTypeNotFound,
db_api.share_type_update,
self.ctxt, share_type['id'], values)
class MessagesDatabaseAPITestCase(test.TestCase):
def setUp(self):
super(MessagesDatabaseAPITestCase, self).setUp()
self.user_id = uuidutils.generate_uuid()
self.project_id = uuidutils.generate_uuid()
self.ctxt = context.RequestContext(
user_id=self.user_id, project_id=self.project_id, is_admin=False)
def test_message_create(self):
result = db_utils.create_message(project_id=self.project_id,
action_id='001')
self.assertIsNotNone(result['id'])
def test_message_delete(self):
result = db_utils.create_message(project_id=self.project_id,
action_id='001')
db_api.message_destroy(self.ctxt, result)
self.assertRaises(exception.NotFound, db_api.message_get,
self.ctxt, result['id'])
def test_message_get(self):
message = db_utils.create_message(project_id=self.project_id,
action_id='001')
result = db_api.message_get(self.ctxt, message['id'])
self.assertEqual(message['id'], result['id'])
self.assertEqual(message['action_id'], result['action_id'])
self.assertEqual(message['detail_id'], result['detail_id'])
self.assertEqual(message['project_id'], result['project_id'])
self.assertEqual(message['message_level'], result['message_level'])
def test_message_get_not_found(self):
self.assertRaises(exception.MessageNotFound, db_api.message_get,
self.ctxt, 'fake_id')
def test_message_get_different_project(self):
message = db_utils.create_message(project_id='another-project',
action_id='001')
self.assertRaises(exception.MessageNotFound, db_api.message_get,
self.ctxt, message['id'])
def test_message_get_all(self):
db_utils.create_message(project_id=self.project_id, action_id='001')
db_utils.create_message(project_id=self.project_id, action_id='001')
db_utils.create_message(project_id='another-project', action_id='001')
result = db_api.message_get_all(self.ctxt)
self.assertEqual(2, len(result))
def test_message_get_all_as_admin(self):
db_utils.create_message(project_id=self.project_id, action_id='001')
db_utils.create_message(project_id=self.project_id, action_id='001')
db_utils.create_message(project_id='another-project', action_id='001')
result = db_api.message_get_all(self.ctxt.elevated())
self.assertEqual(3, len(result))
def test_message_get_all_with_filter(self):
for i in ['001', '002', '002']:
db_utils.create_message(project_id=self.project_id, action_id=i)
result = db_api.message_get_all(self.ctxt,
filters={'action_id': '002'})
self.assertEqual(2, len(result))
def test_message_get_all_with_created_since_or_before_filter(self):
now = timeutils.utcnow()
db_utils.create_message(project_id=self.project_id,
action_id='001',
created_at=now - datetime.timedelta(seconds=1))
db_utils.create_message(project_id=self.project_id,
action_id='001',
created_at=now + datetime.timedelta(seconds=1))
db_utils.create_message(project_id=self.project_id,
action_id='001',
created_at=now + datetime.timedelta(seconds=2))
result1 = db_api.message_get_all(self.ctxt,
filters={'created_before': now})
result2 = db_api.message_get_all(self.ctxt,
filters={'created_since': now})
self.assertEqual(1, len(result1))
self.assertEqual(2, len(result2))
def test_message_get_all_with_invalid_sort_key(self):
self.assertRaises(exception.InvalidInput, db_api.message_get_all,
self.ctxt, sort_key='invalid_key')
def test_message_get_all_sorted_asc(self):
ids = []
for i in ['001', '002', '003']:
msg = db_utils.create_message(project_id=self.project_id,
action_id=i)
ids.append(msg.id)
result = db_api.message_get_all(self.ctxt,
sort_key='action_id',
sort_dir='asc')
result_ids = [r.id for r in result]
self.assertEqual(result_ids, ids)
def test_message_get_all_with_limit_and_offset(self):
for i in ['001', '002']:
db_utils.create_message(project_id=self.project_id,
action_id=i)
result = db_api.message_get_all(self.ctxt, limit=1, offset=1)
self.assertEqual(1, len(result))
def test_message_get_all_sorted(self):
ids = []
for i in ['003', '002', '001']:
msg = db_utils.create_message(project_id=self.project_id,
action_id=i)
ids.append(msg.id)
# Default the sort direction to descending
result = db_api.message_get_all(self.ctxt, sort_key='action_id')
result_ids = [r.id for r in result]
self.assertEqual(result_ids, ids)
def test_cleanup_expired_messages(self):
adm_context = self.ctxt.elevated()
now = timeutils.utcnow()
db_utils.create_message(project_id=self.project_id,
action_id='001',
expires_at=now)
db_utils.create_message(project_id=self.project_id,
action_id='001',
expires_at=now - datetime.timedelta(days=1))
db_utils.create_message(project_id=self.project_id,
action_id='001',
expires_at=now + datetime.timedelta(days=1))
with mock.patch.object(timeutils, 'utcnow') as mock_time_now:
mock_time_now.return_value = now
db_api.cleanup_expired_messages(adm_context)
messages = db_api.message_get_all(adm_context)
self.assertEqual(2, len(messages))
class BackendInfoDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(BackendInfoDatabaseAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_create(self):
host = "fake_host"
value = "fake_hash_value"
initial_data = db_api.backend_info_get(self.ctxt, host)
db_api.backend_info_update(self.ctxt, host, value)
actual_data = db_api.backend_info_get(self.ctxt, host)
self.assertIsNone(initial_data)
self.assertEqual(value, actual_data['info_hash'])
self.assertEqual(host, actual_data['host'])
def test_get(self):
host = "fake_host"
value = "fake_hash_value"
db_api.backend_info_update(self.ctxt, host, value, False)
actual_result = db_api.backend_info_get(self.ctxt, host)
self.assertEqual(value, actual_result['info_hash'])
self.assertEqual(host, actual_result['host'])
def test_delete(self):
host = "fake_host"
value = "fake_hash_value"
db_api.backend_info_update(self.ctxt, host, value)
initial_data = db_api.backend_info_get(self.ctxt, host)
db_api.backend_info_update(self.ctxt, host, delete_existing=True)
actual_data = db_api.backend_info_get(self.ctxt, host)
self.assertEqual(value, initial_data['info_hash'])
self.assertEqual(host, initial_data['host'])
self.assertIsNone(actual_data)
def test_double_update(self):
host = "fake_host"
value_1 = "fake_hash_value_1"
value_2 = "fake_hash_value_2"
initial_data = db_api.backend_info_get(self.ctxt, host)
db_api.backend_info_update(self.ctxt, host, value_1)
db_api.backend_info_update(self.ctxt, host, value_2)
actual_data = db_api.backend_info_get(self.ctxt, host)
self.assertIsNone(initial_data)
self.assertEqual(value_2, actual_data['info_hash'])
self.assertEqual(host, actual_data['host'])
@ddt.ddt
class ShareResourcesAPITestCase(test.TestCase):
def setUp(self):
super(ShareResourcesAPITestCase, self).setUp()
self.context = context.get_admin_context()
@ddt.data('controller-100', 'controller-0@otherstore03',
'controller-0@otherstore01#pool200')
def test_share_resources_host_update_no_matches(self, current_host):
share_id = uuidutils.generate_uuid()
share_network_id = uuidutils.generate_uuid()
share_network_subnet_id = uuidutils.generate_uuid()
if '@' in current_host:
if '#' in current_host:
new_host = 'new-controller-X@backendX#poolX'
else:
new_host = 'new-controller-X@backendX'
else:
new_host = 'new-controller-X'
resources = [ # noqa
# share instances
db_utils.create_share_instance(
share_id=share_id,
host='controller-0@fancystore01#pool100',
status=constants.STATUS_AVAILABLE),
db_utils.create_share_instance(
share_id=share_id,
host='controller-0@otherstore02#pool100',
status=constants.STATUS_ERROR),
db_utils.create_share_instance(
share_id=share_id,
host='controller-2@beststore07#pool200',
status=constants.STATUS_DELETING),
# share groups
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-0@fancystore01#pool200',
status=constants.STATUS_AVAILABLE),
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-0@otherstore02#pool100',
status=constants.STATUS_ERROR),
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-2@beststore07#pool100',
status=constants.STATUS_DELETING),
# share servers
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-0@fancystore01',
status=constants.STATUS_ACTIVE),
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-0@otherstore02#pool100',
status=constants.STATUS_ERROR),
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-2@beststore07',
status=constants.STATUS_DELETING),
]
updates = db_api.share_resources_host_update(self.context,
current_host,
new_host)
expected_updates = {'instances': 0, 'servers': 0, 'groups': 0}
self.assertDictMatch(expected_updates, updates)
# validate that resources are unmodified:
share_instances = db_api.share_instances_get_all(
self.context, filters={'share_id': share_id})
share_groups = db_api.share_group_get_all(
self.context, filters={'share_network_id': share_network_id})
share_servers = db_api._server_get_query(self.context).filter_by(
share_network_subnet_id=share_network_subnet_id).all()
self.assertEqual(3, len(share_instances))
self.assertEqual(3, len(share_groups))
self.assertEqual(3, len(share_servers))
for share_instance in share_instances:
self.assertTrue(not share_instance['host'].startswith(new_host))
for share_group in share_groups:
self.assertTrue(not share_group['host'].startswith(new_host))
for share_server in share_servers:
self.assertTrue(not share_server['host'].startswith(new_host))
@ddt.data(
{'current_host': 'controller-2',
'expected_updates': {'instances': 1, 'servers': 2, 'groups': 1}},
{'current_host': 'controller-0@fancystore01',
'expected_updates': {'instances': 2, 'servers': 1, 'groups': 2}},
{'current_host': 'controller-0@fancystore01#pool100',
'expected_updates': {'instances': 1, 'servers': 1, 'groups': 0}})
@ddt.unpack
def test_share_resources_host_update_partial_matches(self, current_host,
expected_updates):
share_id = uuidutils.generate_uuid()
share_network_id = uuidutils.generate_uuid()
share_network_subnet_id = uuidutils.generate_uuid()
if '@' in current_host:
if '#' in current_host:
new_host = 'new-controller-X@backendX#poolX'
else:
new_host = 'new-controller-X@backendX'
else:
new_host = 'new-controller-X'
total_updates_expected = (expected_updates['instances']
+ expected_updates['groups']
+ expected_updates['servers'])
resources = [ # noqa
# share instances
db_utils.create_share_instance(
share_id=share_id,
host='controller-0@fancystore01#pool100',
status=constants.STATUS_AVAILABLE),
db_utils.create_share_instance(
share_id=share_id,
host='controller-0@fancystore01#pool200',
status=constants.STATUS_ERROR),
db_utils.create_share_instance(
share_id=share_id,
host='controller-2@beststore07#pool200',
status=constants.STATUS_DELETING),
# share groups
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-0@fancystore01#pool101',
status=constants.STATUS_ACTIVE),
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-0@fancystore01#pool101',
status=constants.STATUS_ERROR),
db_utils.create_share_group(
share_network_id=share_network_id,
host='controller-2@beststore07#pool200',
status=constants.STATUS_DELETING),
# share servers
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-0@fancystore01#pool100',
status=constants.STATUS_ACTIVE),
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-2@fancystore01',
status=constants.STATUS_ERROR),
db_utils.create_share_server(
share_network_subnet_id=share_network_subnet_id,
host='controller-2@beststore07#pool200',
status=constants.STATUS_DELETING),
]
actual_updates = db_api.share_resources_host_update(
self.context, current_host, new_host)
share_instances = db_api.share_instances_get_all(
self.context, filters={'share_id': share_id})
share_groups = db_api.share_group_get_all(
self.context, filters={'share_network_id': share_network_id})
share_servers = db_api._server_get_query(self.context).filter_by(
share_network_subnet_id=share_network_subnet_id).all()
updated_resources = [
res for res in share_instances + share_groups + share_servers
if res['host'].startswith(new_host)
]
self.assertEqual(expected_updates, actual_updates)
self.assertEqual(total_updates_expected, len(updated_resources))
def test_share_instances_status_update(self):
for i in range(1, 3):
instances = [
db_utils.create_share_instance(
status=constants.STATUS_SERVER_MIGRATING, share_id='fake')
for __ in range(1, 3)]
share_instance_ids = [instance['id'] for instance in instances]
values = {'status': constants.STATUS_AVAILABLE}
db_api.share_instances_status_update(
self.context, share_instance_ids, values)
instances = [
db_api.share_instance_get(self.context, instance_id)
for instance_id in share_instance_ids]
for instance in instances:
self.assertEqual(constants.STATUS_AVAILABLE, instance['status'])
def test_share_snapshot_instances_status_update(self):
share_instance = db_utils.create_share_instance(
status=constants.STATUS_AVAILABLE, share_id='fake')
instances = [
db_utils.create_snapshot_instance(
'fake_snapshot_id_1', status=constants.STATUS_CREATING,
share_instance_id=share_instance['id'])
for __ in range(1, 3)]
snapshot_instance_ids = [instance['id'] for instance in instances]
values = {'status': constants.STATUS_AVAILABLE}
db_api.share_snapshot_instances_status_update(
self.context, snapshot_instance_ids, values)
instances = [
db_api.share_snapshot_instance_get(self.context, instance_id)
for instance_id in snapshot_instance_ids]
for instance in instances:
self.assertEqual(constants.STATUS_AVAILABLE, instance['status'])
def test_share_and_snapshot_instances_status_update(self):
share_instance = db_utils.create_share_instance(
status=constants.STATUS_AVAILABLE, share_id='fake')
share_instance_ids = [share_instance['id']]
fake_session = db_api.get_session()
snap_instances = [
db_utils.create_snapshot_instance(
'fake_snapshot_id_1', status=constants.STATUS_CREATING,
share_instance_id=share_instance['id'])
for __ in range(1, 3)]
snapshot_instance_ids = [instance['id'] for instance in snap_instances]
values = {'status': constants.STATUS_AVAILABLE}
mock_update_share_instances = self.mock_object(
db_api, 'share_instances_status_update',
mock.Mock(return_value=[share_instance]))
mock_update_snap_instances = self.mock_object(
db_api, 'share_snapshot_instances_status_update',
mock.Mock(return_value=snap_instances))
mock_get_session = self.mock_object(
db_api, 'get_session', mock.Mock(return_value=fake_session))
updated_share_instances, updated_snap_instances = (
db_api.share_and_snapshot_instances_status_update(
self.context, values, share_instance_ids=share_instance_ids,
snapshot_instance_ids=snapshot_instance_ids))
mock_get_session.assert_called()
mock_update_share_instances.assert_called_once_with(
self.context, share_instance_ids, values, session=fake_session)
mock_update_snap_instances.assert_called_once_with(
self.context, snapshot_instance_ids, values, session=fake_session)
self.assertEqual(updated_share_instances, [share_instance])
self.assertEqual(updated_snap_instances, snap_instances)
@ddt.data(
{
'share_instance_status': constants.STATUS_ERROR,
'snap_instance_status': constants.STATUS_AVAILABLE,
'expected_exc': exception.InvalidShareInstance
},
{
'share_instance_status': constants.STATUS_AVAILABLE,
'snap_instance_status': constants.STATUS_ERROR,
'expected_exc': exception.InvalidShareSnapshotInstance
}
)
@ddt.unpack
def test_share_and_snapshot_instances_status_update_invalid_status(
self, share_instance_status, snap_instance_status, expected_exc):
share_instance = db_utils.create_share_instance(
status=share_instance_status, share_id='fake')
share_snapshot_instance = db_utils.create_snapshot_instance(
'fake_snapshot_id_1', status=snap_instance_status,
share_instance_id=share_instance['id'])
share_instance_ids = [share_instance['id']]
snap_instance_ids = [share_snapshot_instance['id']]
values = {'status': constants.STATUS_AVAILABLE}
fake_session = db_api.get_session()
mock_get_session = self.mock_object(
db_api, 'get_session', mock.Mock(return_value=fake_session))
mock_instances_get_all = self.mock_object(
db_api, 'share_instances_get_all',
mock.Mock(return_value=[share_instance]))
mock_snap_instances_get_all = self.mock_object(
db_api, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[share_snapshot_instance]))
self.assertRaises(expected_exc,
db_api.share_and_snapshot_instances_status_update,
self.context,
values,
share_instance_ids=share_instance_ids,
snapshot_instance_ids=snap_instance_ids,
current_expected_status=constants.STATUS_AVAILABLE)
mock_get_session.assert_called()
mock_instances_get_all.assert_called_once_with(
self.context, filters={'instance_ids': share_instance_ids},
session=fake_session)
if snap_instance_status == constants.STATUS_ERROR:
mock_snap_instances_get_all.assert_called_once_with(
self.context, {'instance_ids': snap_instance_ids},
session=fake_session)
@ddt.ddt
class AsyncOperationDatabaseAPITestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(AsyncOperationDatabaseAPITestCase, self).setUp()
self.user_id = uuidutils.generate_uuid()
self.project_id = uuidutils.generate_uuid()
self.ctxt = context.RequestContext(
user_id=self.user_id, project_id=self.project_id, is_admin=False)
def _get_async_operation_test_data(self):
return uuidutils.generate_uuid()
@ddt.data({"details": {"foo": "bar", "tee": "too"},
"valid": {"foo": "bar", "tee": "too"}},
{"details": {"foo": "bar", "tee": ["test"]},
"valid": {"foo": "bar", "tee": str(["test"])}})
@ddt.unpack
def test_update(self, details, valid):
entity_id = self._get_async_operation_test_data()
initial_data = db_api.async_operation_data_get(self.ctxt, entity_id)
db_api.async_operation_data_update(self.ctxt, entity_id, details)
actual_data = db_api.async_operation_data_get(self.ctxt, entity_id)
self.assertEqual({}, initial_data)
self.assertEqual(valid, actual_data)
@ddt.data({'with_deleted': True, 'append': False},
{'with_deleted': True, 'append': True},
{'with_deleted': False, 'append': False},
{'with_deleted': False, 'append': True})
@ddt.unpack
def test_update_with_more_values(self, with_deleted, append):
entity_id = self._get_async_operation_test_data()
details = {"tee": "too"}
more_details = {"foo": "bar"}
result = {"tee": "too", "foo": "bar"}
db_api.async_operation_data_update(self.ctxt, entity_id, details)
if with_deleted:
db_api.async_operation_data_delete(self.ctxt, entity_id)
if append:
more_details.update(details)
if with_deleted and not append:
result.pop("tee")
db_api.async_operation_data_update(self.ctxt, entity_id, more_details)
actual_result = db_api.async_operation_data_get(self.ctxt, entity_id)
self.assertEqual(result, actual_result)
@ddt.data(True, False)
def test_update_with_duplicate(self, with_deleted):
entity_id = self._get_async_operation_test_data()
details = {"tee": "too"}
db_api.async_operation_data_update(self.ctxt, entity_id, details)
if with_deleted:
db_api.async_operation_data_delete(self.ctxt, entity_id)
db_api.async_operation_data_update(self.ctxt, entity_id, details)
actual_result = db_api.async_operation_data_get(self.ctxt,
entity_id)
self.assertEqual(details, actual_result)
def test_update_with_delete_existing(self):
resource_id = self._get_async_operation_test_data()
details = {"key1": "val1", "key2": "val2", "key3": "val3"}
details_update = {"key1": "val1_upd", "key4": "new_val"}
# Create new details
db_api.async_operation_data_update(self.ctxt, resource_id, details)
db_api.async_operation_data_update(self.ctxt, resource_id,
details_update,
delete_existing=True)
actual_result = db_api.async_operation_data_get(self.ctxt, resource_id)
self.assertEqual(details_update, actual_result)
def test_get(self):
resource_id = self._get_async_operation_test_data()
test_key = "foo"
test_keys = [test_key, "tee"]
details = {test_keys[0]: "val", test_keys[1]: "val", "mee": "foo"}
db_api.async_operation_data_update(self.ctxt, resource_id, details)
actual_result_all = db_api.async_operation_data_get(
self.ctxt, resource_id)
actual_result_single_key = db_api.async_operation_data_get(
self.ctxt, resource_id, test_key)
actual_result_list = db_api.async_operation_data_get(
self.ctxt, resource_id, test_keys)
self.assertEqual(details, actual_result_all)
self.assertEqual(details[test_key], actual_result_single_key)
self.assertEqual(dict.fromkeys(test_keys, "val"), actual_result_list)
def test_delete_single(self):
test_id = self._get_async_operation_test_data()
test_key = "foo"
details = {test_key: "bar", "tee": "too"}
valid_result = {"tee": "too"}
db_api.async_operation_data_update(self.ctxt, test_id, details)
db_api.async_operation_data_delete(self.ctxt, test_id, test_key)
actual_result = db_api.async_operation_data_get(
self.ctxt, test_id)
self.assertEqual(valid_result, actual_result)
def test_delete_all(self):
test_id = self._get_async_operation_test_data()
details = {"foo": "bar", "tee": "too"}
db_api.async_operation_data_update(self.ctxt, test_id, details)
db_api.async_operation_data_delete(self.ctxt, test_id)
actual_result = db_api.async_operation_data_get(
self.ctxt, test_id)
self.assertEqual({}, actual_result)
| apache-2.0 | 6,511,009,015,598,716,000 | 41.306273 | 79 | 0.566753 | false |
johnsonc/OTM2 | opentreemap/treemap/migrations/0038_auto__add_field_instance_itree_region_default.py | 3 | 19000 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Instance.itree_region_default'
db.add_column(u'treemap_instance', 'itree_region_default',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Instance.itree_region_default'
db.delete_column(u'treemap_instance', 'itree_region_default')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'airquality_aggregate_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'carbon_dioxide_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'stormwater_gal_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.importevent': {
'Meta': {'object_name': 'ImportEvent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"}),
'imported_on': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instancespecies': {
'Meta': {'object_name': 'InstanceSpecies'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"})
},
u'treemap.instanceuser': {
'Meta': {'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ImportEvent']", 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ImportEvent']", 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.treephoto': {
'Meta': {'object_name': 'TreePhoto'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'treemap.userdefinedcollectionvalue': {
'Meta': {'object_name': 'UserDefinedCollectionValue'},
'data': ('django_hstore.fields.DictionaryField', [], {}),
'field_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.UserDefinedFieldDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.userdefinedfielddefinition': {
'Meta': {'object_name': 'UserDefinedFieldDefinition'},
'datatype': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'iscollection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['treemap'] | gpl-3.0 | -8,868,243,839,038,990,000 | 79.172996 | 208 | 0.550737 | false |
pjialin/py12306 | py12306/helpers/auth_code.py | 1 | 2835 | import random
import time
from requests.exceptions import SSLError
from py12306.config import Config
from py12306.helpers.OCR import OCR
from py12306.helpers.api import *
from py12306.helpers.request import Request
from py12306.helpers.func import *
from py12306.log.common_log import CommonLog
from py12306.log.user_log import UserLog
class AuthCode:
"""
验证码类
"""
session = None
data_path = None
retry_time = 5
def __init__(self, session):
self.data_path = Config().RUNTIME_DIR
self.session = session
@classmethod
def get_auth_code(cls, session):
self = cls(session)
img = self.download_code()
position = OCR.get_img_position(img)
if not position: # 打码失败
return self.retry_get_auth_code()
answer = ','.join(map(str, position))
if not self.check_code(answer):
return self.retry_get_auth_code()
return position
def retry_get_auth_code(self): # TODO 安全次数检测
CommonLog.add_quick_log(CommonLog.MESSAGE_RETRY_AUTH_CODE.format(self.retry_time)).flush()
time.sleep(self.retry_time)
return self.get_auth_code(self.session)
def download_code(self):
url = API_AUTH_CODE_BASE64_DOWNLOAD.format(random=random.random())
# code_path = self.data_path + 'code.png'
try:
self.session.cookies.clear_session_cookies()
UserLog.add_quick_log(UserLog.MESSAGE_DOWNLAODING_THE_CODE).flush()
# response = self.session.save_to_file(url, code_path) # TODO 返回错误情况
response = self.session.get(url)
result = response.json()
if result.get('image'):
return result.get('image')
raise SSLError('返回数据为空')
except SSLError as e:
UserLog.add_quick_log(
UserLog.MESSAGE_DOWNLAOD_AUTH_CODE_FAIL.format(e, self.retry_time)).flush()
time.sleep(self.retry_time)
return self.download_code()
def check_code(self, answer):
"""
校验验证码
:return:
"""
url = API_AUTH_CODE_CHECK.get('url').format(answer=answer, random=time_int())
response = self.session.get(url)
result = response.json()
if result.get('result_code') == '4':
UserLog.add_quick_log(UserLog.MESSAGE_CODE_AUTH_SUCCESS).flush()
return True
else:
# {'result_message': '验证码校验失败', 'result_code': '5'}
UserLog.add_quick_log(
UserLog.MESSAGE_CODE_AUTH_FAIL.format(result.get('result_message'))).flush()
self.session.cookies.clear_session_cookies()
return False
if __name__ == '__main__':
code_result = AuthCode.get_auth_code()
| apache-2.0 | -3,871,019,704,660,593,700 | 31.458824 | 98 | 0.607829 | false |
abretaud/tools-iuc | data_managers/data_manager_mitos/data_manager/data_manager.py | 2 | 2748 | import argparse
import json
import os
import shutil
import tarfile
try:
# For Python 3.0 and later
from urllib.request import Request, urlopen
except ImportError:
# Fall back to Python 2 imports
from urllib2 import Request, urlopen
ZENODO = {
"mitos": "2683856",
"mitos2": "3685310"
}
NAMES = {
"mitos1-refdata": "RefSeq39 + MiTFi tRNA models",
"refseq39": "RefSeq39 (equivalent to MITOS1 data)",
"refseq63m": "RefSeq63 Metazoa",
"refseq63f": "RefSeq63 Fungi",
"refseq63o": "RefSeq63 Opisthokonta",
"refseq89m": "RefSeq89 Metazoa",
"refseq89f": "RefSeq89 Fungi",
"refseq89o": "RefSeq89 Opisthokonta"
}
def url_download(tpe, db, workdir):
"""
download http://ab.inf.uni-tuebingen.de/data/software/megan6/download/FNAME
to workdir
and unzip
return the name of the resulting dir
"""
tarfname = os.path.join(workdir, db + ".tar.bz")
if not os.path.exists(workdir):
os.makedirs(workdir)
src = None
dst = None
try:
req = Request("https://zenodo.org/record/{zenodoid}/files/{db}.tar.bz2?download=1".format(zenodoid=ZENODO[tpe], db=db))
src = urlopen(req)
with open(tarfname, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
with tarfile.open(tarfname, "r:bz2") as tar:
dirname = tar.getnames()[0]
tar.extractall(workdir)
os.remove(tarfname)
return dirname
def main(tpe, db, outjson):
workdir = os.getcwd()
path = url_download(tpe, db, workdir)
data_manager_entry = {}
data_manager_entry['value'] = db
data_manager_entry['name'] = NAMES[db]
data_manager_entry['type'] = tpe
data_manager_entry['path'] = path
data_manager_json = dict(data_tables=dict(mitos=data_manager_entry))
with open(outjson) as f:
params = json.loads(f.read())
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
# output_path = os.path.abspath(os.path.join(os.getcwd(), 'mitos'))
shutil.move(os.path.join(workdir, path), target_directory)
with open(outjson, 'w') as fh:
fh.write(json.dumps(data_manager_json, sort_keys=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create data manager json.')
parser.add_argument('--out', action='store', help='JSON filename')
parser.add_argument('--type', action='store', help='mitos version')
parser.add_argument('--db', action='store', help='db name')
args = parser.parse_args()
main(args.type, args.db, args.out)
| mit | 8,881,372,008,880,541,000 | 29.197802 | 127 | 0.621179 | false |
ProjexSoftware/projexui | projexui/widgets/xrolloutwidget.py | 2 | 8758 | #!/usr/bin/python
"""
The rollout widget allows for multiple collapsible views to be open at once.
"""
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintenance information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
import datetime
from projex.text import nativestring
from projexui.xpainter import XPainter
from projexui.qt import Signal
from projexui.qt.QtCore import Qt
from projexui.qt.QtGui import QFrame,\
QHBoxLayout,\
QLabel,\
QPalette,\
QScrollArea,\
QSizePolicy,\
QToolButton,\
QVBoxLayout,\
QWidget,\
QPushButton,\
QIcon,\
QPen
from projexui.xpainter import XPainter
import projexui.resources
TITLE_STYLESHEET = """\
QPushButton {
text-align: left;
border: 1px solid palette(midlight);
border-radius: 8px;
}
QPushButton:hover {
background-color: palette(button);
}
"""
class XRolloutItem(QWidget):
def __init__( self, rolloutWidget, widget, title = 'Rollout', expanded = False ):
super(XRolloutItem, self).__init__(rolloutWidget)
# initialize the interface
self._rolloutWidget = rolloutWidget
self._widget = widget
self._expanded = expanded
self._titleButton = QPushButton(self)
self._titleButton.setFlat(True)
self._titleButton.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Minimum)
self._titleButton.setFixedHeight(20)
self._titleButton.setText(title)
self._titleButton.setStyleSheet(TITLE_STYLESHEET)
layout = QVBoxLayout()
layout.setContentsMargins(6, 6, 6, 9)
layout.setSpacing(2)
layout.addWidget(self._titleButton)
layout.addWidget(widget)
self.setLayout(layout)
# initialize the expanded state
self.setExpanded(expanded)
# create connections
self._titleButton.clicked.connect( self.toggleExpanded )
def collapse( self ):
"""
Collapses this rollout item.
"""
self.setExpanded(False)
def expand( self ):
"""
Expands this rollout item.
"""
self.setExpanded(True)
def isCollapsed( self ):
"""
Returns whether or not this rollout is in the collapsed state.
:return <bool>
"""
return not self._expanded
def paintEvent( self, event ):
"""
Overloads the paint event to draw rounded edges on this widget.
:param event | <QPaintEvent>
"""
super(XRolloutItem, self).paintEvent(event)
with XPainter(self) as painter:
w = self.width() - 3
h = self.height() - 3
color = self.palette().color(QPalette.Midlight)
color = color.darker(180)
pen = QPen(color)
pen.setWidthF(0.5)
painter.setPen(pen)
painter.setBrush(self.palette().color(QPalette.Midlight))
painter.setRenderHint(XPainter.Antialiasing)
painter.drawRoundedRect(1, 1, w, h, 10, 10)
def isExpanded( self ):
"""
Returns whether or not this rollout is in the expanded state.
:return <bool>
"""
return self._expanded
def rolloutWidget( self ):
"""
Returns the rollout widget that this item is associated with.
:return <XRolloutWidget>
"""
return self._rolloutWidget
def setCollapsed( self, state ):
"""
Sets whether or not this rollout is in the collapsed state.
:param state | <bool>
"""
return self.setExpanded(not state)
def setExpanded( self, state ):
"""
Sets whether or not this rollout is in the expanded state.
:param state | <bool>
"""
self._expanded = state
self._widget.setVisible(state)
if ( state ):
ico = projexui.resources.find('img/treeview/triangle_down.png')
else:
ico = projexui.resources.find('img/treeview/triangle_right.png')
self._titleButton.setIcon(QIcon(ico))
# emit the signals for this widget
rollout = self.rolloutWidget()
if ( not rollout.signalsBlocked() ):
index = rollout.widget().layout().indexOf(self)
rollout.itemCollapsed.emit(index)
rollout.itemExpanded.emit(index)
def setTitle( self, title ):
"""
Sets the title for this item to the inputed title text.
:param title | <str>
"""
self._titleLabel.setText(title)
def title( self ):
"""
Returns the title for this rollout.
:return <str>
"""
return nativestring(self._titleLabel.text())
def toggleExpanded( self ):
"""
Toggles whether or not this rollout is in the expanded state.
"""
self.setExpanded(not self.isExpanded())
def widget( self ):
"""
Returns the widget that is associated with this rollout item.
:return <QWidget>
"""
return self._widget
#------------------------------------------------------------------------------
class XRolloutWidget(QScrollArea):
""" """
itemCollapsed = Signal(int)
itemExpanded = Signal(int)
def __init__( self, parent = None ):
super(XRolloutWidget, self).__init__( parent )
# define custom properties
self.setWidgetResizable(True)
# set default properties
widget = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(3)
layout.addStretch(1)
widget.setLayout(layout)
self.setWidget(widget)
def clear( self ):
"""
Clears out all of the rollout items from the widget.
"""
self.blockSignals(True)
self.setUpdatesEnabled(False)
for child in self.findChildren(XRolloutItem):
child.setParent(None)
child.deleteLater()
self.setUpdatesEnabled(True)
self.blockSignals(False)
def addRollout( self, widget, title, expanded = False ):
"""
Adds a new widget to the rollout system.
:param widget | <QWidget>
title | <str>
expanded | <bool>
:return <XRolloutItem>
"""
layout = self.widget().layout()
item = XRolloutItem(self, widget, title, expanded)
layout.insertWidget(layout.count() - 1, item)
return item
def count( self ):
"""
Returns the number of items that are associated with this rollout.
:return <int>
"""
return self.widget().layout().count() - 1
def itemAt( self, index ):
"""
Returns the rollout item at the inputed index.
:return <XRolloutItem> || None
"""
layout = self.widget().layout()
if ( 0 <= index and index < (layout.count() - 1) ):
return layout.itemAt(index).widget()
return None
def items( self ):
"""
Returns all the rollout items for this widget.
:return [<XRolloutItem>, ..]
"""
layout = self.widget().layout()
return [layout.itemAt(i).widget() for i in range(layout.count()-1)]
def takeAt( self, index ):
"""
Removes the widget from the rollout at the inputed index.
:param index | <int>
:return <QWidget> || None
"""
layout = self.widget().layout()
item = layout.takeAt(index)
if ( not item ):
return None
return item.widget().widget()
__designer_plugins__ = [XRolloutWidget] | lgpl-3.0 | 2,328,493,244,918,064,600 | 28.183946 | 85 | 0.517698 | false |
caioserra/apiAdwords | examples/adspygoogle/dfp/v201302/get_users_by_statement.py | 3 | 1818 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all users sorted by name. The statement retrieves up
to the maximum page size limit of 500. To create new users,
run create_users.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201302')
# Create statement object to get all users stored by name.
filter_statement = {'query': 'ORDER BY name LIMIT 500'}
# Get users by statement.
response = user_service.GetUsersByStatement(filter_statement)[0]
users = []
if 'results' in response:
users = response['results']
# Display results.
for user in users:
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
print
print 'Number of results found: %s' % len(users)
| apache-2.0 | 9,092,832,380,309,353,000 | 32.666667 | 80 | 0.713971 | false |
bosmanoglu/adore-doris | lib/python/snaphu.py | 2 | 9223 | # snaphu.py
import _snaphu
import sys, re
import csv
import numpy as np
import adore
import insar
def dict2obj(d):
#Modified from
#Ygor Lemos: parand.com/say/index.php/2008/10/13/access-python-dictionary-keys-as-properties/
class DictObj:
def __init__(self, **entries):
for e in entries:
#No space and dot for attribute name
et="_".join(e.split())
et=et.replace('.','')
if isinstance(d[e], dict):
self.__dict__[et]=dict2obj(d[e])
else:
self.__dict__[et]=d[e]
return DictObj(**d)
_format =dict(complex_data=1, alt_line_data=3, alt_sample_data=4, float_data=2,
COMPLEX_DATA=1, ALT_LINE_DATA=3, ALT_SAMPLE_DATA=4, FLOAT_DATA=2)
_cost =dict(nostatcosts=0, topo=1, defo=2, smooth=3, NOSTATCOSTS=0, TOPO=1, DEFO=2, SMOOTH=3)
_method =dict(mst=1, mcf=2, MST=1, MCF=2)
_transmit=dict(singleanttransmit=1, pingpong=2, repeatpass=1,
SINGLEANTTRANSMIT=1, PINGPONG=2, REPEATPASS=1)
_bool =dict(true=1, false=0, TRUE=1, FALSE=0)
bool =dict2obj(_bool)
format =dict2obj(_format)
cost =dict2obj(_cost)
method =dict2obj(_method)
transmit=dict2obj(_transmit)
class _CommentedFile:
def __init__(self, f, commentstring="#"):
self.f = f
self.commentstring = commentstring
def next(self):
line = self.f.next()
while ((not line) or (line=="\n") or line.startswith(self.commentstring)):
line = self.f.next()
#print "." + line + "."
return line
def __iter__(self):
return self
class Snaphu(_snaphu._Snaphu):
def __init__(self):
self._keyword=dict(INFILE=lambda x: self._infile_set(x),
LINELENGTH=lambda x: self._lines_set(x),
OUTFILE=lambda x: self._outfile_set(x),
WEIGHTFILE=lambda x: self._weightfile_set(x),
STATCOSTMODE=lambda x: self._costmode_set(_cost[x]),
CORRFILE=lambda x: self._corrfile_set(x),
LOGFILE=lambda x: self._logfile_set(x),
INITMETHOD=lambda x: self._initmethod_set(_method[x]),
VERBOSE=lambda x: self._verbose_set(_bool[x]),
INFILEFORMAT=lambda x: self._infileformat_set(_format[x]),
CORRFILEFORMAT=lambda x: self._corrfileformat_set(_format[x]),
OUTFILEFORMAT=lambda x: self._outfileformat_set(_format[x]),
ORBITRADIUS=lambda x: self._orbitradius_set(float(x)),
EARTHRADIUS=lambda x: self._earthradius_set(float(x)),
BASELINE=lambda x: self._baseline_set(float(x)),
BASELINEANGLE_DEG=lambda x: self._baselineangle_set(np.deg2rad(float(x))),
TRANSMITMODE=lambda x: self._transmitmode_set(_transmit[x]),
NEARRANGE=lambda x: self._nearrange_set(float(x)),
DR=lambda x: self._dr_set(float(x)),
DA=lambda x: self._da_set(float(x)),
RANGERES=lambda x: self._rangeres_set(float(x)),
AZRES=lambda x: self._azres_set(float(x)),
LAMBDA=lambda x: self._wavelength_set(float(x)),
NLOOKSRANGE=lambda x: self._nlooksrange_set(int(x)),
NLOOKSAZ=lambda x: self._nlooksaz_set(int(x)),
NCORRLOOKS=lambda x: self._ncorrlooks_set(float(x)),
NCORRLOOKSRANGE=lambda x: self._ncorrlooksrange_set(int(x)),
NCORRLOOKSAZ=lambda x: self._ncorrlooksaz_set(int(x)),
NTILEROW=lambda x: self._ntilerow_set(int(x)),
NTILECOL=lambda x: self._ntilecol_set(int(x)),
ROWOVRLP=lambda x: self._rowovrlp_set(int(x)),
COLOVRLP=lambda x: self._colovrlp_set(int(x)),
NPROC=lambda x: self._nthreads_set(int(x)),
TILECOSTTHRESH=lambda x: self._tilecostthresh_set(int(x)),)
# output file parameters
self.outfileformat= format.float_data
# input file parameters
self.infileformat = format.complex_data
self.unwrappedinfileformat = format.float_data
self.magfileformat = format.float_data
self.corrfileformat = format.float_data
self.ampfileformat = format.float_data
self.estfileformat = format.float_data
# Scattering model parameters #
self.layminei = 1.25
self.kds = 0.02
self.specularexp = 8.0
self.dzrcritfactor = 2.0
self.shadow = False #not yet enabled.
self.dzeimin = -4.0
self.laywidth = 16
self.layminei = 1.25
self.sloperatiofactor = 1.18
self.sigsqei = 100.0
# Decorrelation model parameters #
self.drho = 0.005
self.rhosconst1 = 1.3
self.rhosconst2 = 0.14
self.cstd1 = 0.4
self.cstd2 = 0.35
self.cstd3 = 0.06
self.defaultcorr = 0.01
self.rhominfactor = 1.3
# PDF model parameters #
self.dzlaypeak = -2.0
self.azdzfactor = 0.99
self.dzeifactor = 4.0
self.dzeiweight = 0.5
self.dzlayfactor = 1.0
self.layconst = 0.9
self.layfalloffconst = 2.0
self.sigsqshortmin = 1
self.sigsqlayfactor = 0.1
# Deformation mode parameters #
self.defoazdzfactor = 1.0
self.defothreshfactor = 1.2
self.defomax = 7.5398
self.sigsqcorr = 0.05
self.defolayconst = 0.9
# Algorithm parameters #
self.eval = False
self.unwrapped = False
self.regrowconncomps = False
self.initonly = False
self.initmethod = method.mst
self.costmode = cost.topo
self.dumpall = False
self.verbose = True
self.amplitude = True
self.havemagnitude = False
self.flipphasesign = False
self.initmaxflow = 9999
self.arcmaxflowconst =3
self.maxflow = 4
self.krowei = 65
self.kcolei = 257
self.kpardpsi = 7
self.kperpdpsi = 7
self.threshold = 0.001
self.initdzr = 2048.0
self.initdzstep = 100.0
self.maxcost = 1000.0
self.costscale = 100.0
self.costscaleambight = 80.0
self.dnomincangle = 0.01
self.srcrow = 0
self.srccol = 0
self.p = 0
self.nshortcycle = 200
self.maxnewnodeconst = 0.0008
self.maxnflowcycles = 10
self.maxcyclefraction =0.00001
self.sourcemode = 0
self.cs2scalefactor = 8
self.transmitmode = transmit.singleanttransmit
self.nlooksother = 1
# tiling parameters
self.ntilerow = 1
self.ntilecol = 1
self.rowovrlp = 0
self.colovrlp = 0
self.piecefirstrow = 1
self.piecefirstcol = 1
self.piecenrow = 0
self.piecencol = 0
self.tilecostthresh = 500
self.minregionsize = 100
self.nthreads = 1
self.scndryarcflowmax = 8
self.tileedgeweight = 2.5
self.assembleonly = False
self.rmtmptile = False
self.tiledir = 'tiles'
# connected component parameters
self.minconncompfrac = 0.01
self.conncompthresh = 300
self.maxncomps = 32
def read_config(self, configfile):
tsv_file = csv.reader(_CommentedFile(open(configfile, "rb")), delimiter=' ', skipinitialspace=True)
for row in tsv_file:
#print row # prints column 3 of each line
try:
keyword=row[0]
parameter=row[1]
except:
continue
self._keyword[keyword](parameter)
def update(self):
if not self.infile:
value = raw_input('What is the input file:')
self._infile_set(value)
if not self.width>0:
value = raw_input('What is the file width (i.e. number of pixels in a row):')
self._width_set(int(value))
if not self.lines>0:
value = raw_input('What is the file length (i.e. number of lines):')
self._lines_set(int(value))
if (self.ncol<=0 or self.ncol>self.width):
self.ncol=self.width
if (self.nrow<=0 or self.nrow>self.lines):
self.nrow=self.lines
if (self.piecenrow<=0 or self.piecenrow>self.lines):
self.piecenrow=self.lines
if (self.piecencol<=0 or self.piecencol>self.width):
self.piecencol=self.width
def unwrap(self):
# No tile support at the moment
self.ncol=self.width
self.nrow=self.lines
self.piecenrow=self.lines
self.piecencol=self.width
self.unwrap_tile()
def unwrap_multigrid(self, grids=[2,0]):
infile=self.infile
corrfile=self.corrfile
width=self.width
lines=self.lines
outfile=self.outfile
if not self.estfile:
estfile='snaphu.est'
cint=adore.getdata(self.infile, self.width, 'cr4')
coh=adore.getdata(self.corrfile, self.width, 'r4')
for k in xrange(len(grids)):
mcint=insar.multilook(cint, [2**grids[k], 2**grids[k]])
mcoh =insar.multilook(coh , [2**grids[k], 2**grids[k]])
if k==0:
self.infile=infile + str(grids[k])
self.corrfile=corrfile + str(grids[k])
self.outfile=outfile + str(grids[k])
adore.writedata(self.infile, mcint, 'cr4')
adore.writedata(self.corrfile, mcoh, 'r4')
self.width=mcint.shape[1]
self.lines=mcint.shape[0]
self.update()
self.unwrap_tile()
else:
munw=adore.getdata(self.outfile, self.width, 'r4')
if grids[k]==0:
unwk=insar.oversample(munw, [1,1], shape=(lines, width))
self.outfile=outfile
self.infile=infile
self.width=width
self.lines=lines
self.corrfile=corrfile
self.estfile=estfile + str(grids[k-1])
adore.writedata(self.estfile, unwk, 'r4')
self.estfileformat=format.float_data
self.update()
self.unwrap()
| gpl-2.0 | 4,273,140,494,309,871,600 | 32.296029 | 103 | 0.631031 | false |
gizmachi/planetlab_tracer | src/tracer2.py | 1 | 18433 | #!/usr/bin/python2.7
# This tool is written as a part of the course TDDD17
# at the Linkoping University, Sweden (2015).
# Support: [email protected]
# OS tools needed:
# openssl, whois, scamper
# scamper needs to run as root. (uses raw sockets)
import os
# import argparse
import cPickle as pickle
import numpy as np
import multiprocessing
import time
import math
import socket
import string
# try robust cipher checks
from functools import wraps
import errno
import os
import signal
# Some filenames and directories
targets_dir = "targets/"
output_dir = "output/"
certs_dir = output_dir + "cert/"
topsites_filename = "topsites.txt"
geoasn_filename = "geoasn.pickles"
topips_filename = "topips.txt"
output_filename = "sample_ciphers.txt"
# GLOBAL
on_pl = False
as_map = {}
# All relevant info for a single target domain
class Website:
def __init__(self, url, rank):
self.url = url
self.rank = rank
self.source = str(socket.gethostbyname(socket.gethostname()))
self.ssl = None
self.cert_keylength = None
self.cert_ca = None
self.cert_subject = None
self.cert_san = None
self.cert_fingerprint = None
self.cert_valid = None
self.cert_sign_algo = None
self.cert_pubkey_algo = None
self.cipher_suites = None
self.http_ip = None
self.http_path_ip = None
self.http_path_as = None
self.http_path_country = None
self.https_ip = None
self.https_path_ip = None
self.https_path_as = None
self.https_path_country = None
def __str__(self):
delim = '|'
return str(self.rank) \
+ delim + str(self.url) \
+ delim + str(self.http_ip) \
+ delim + str(self.http_path_ip) \
+ delim + str(self.http_path_as) \
+ delim + str(self.http_path_country) \
+ delim + str(self.ssl) \
+ delim + str(self.cert_valid) \
+ delim + str(self.cert_sign_algo) \
+ delim + str(self.cert_pubkey_algo) \
+ delim + str(self.cert_keylength) \
+ delim + str(self.cert_ca) \
+ delim + str(self.cert_subject) \
+ delim + str(self.cert_san) \
+ delim + str(self.cert_fingerprint) \
+ delim + str(self.cipher_suites)
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
def setupFolders(clean):
# Create the necessary folder structure
# Create certs folder if not exists
if not os.path.exists(certs_dir):
os.makedirs(certs_dir)
# Create targets folder if not exists
if not os.path.exists(targets_dir):
os.makedirs(targets_dir)
# Create output folder if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Remove aux files, will be recreated later if needed
if clean:
if os.path.exists(targets_dir + '/' + topsites_filename):
os.remove(targets_dir + '/' + topsites_filename)
if os.path.exists(targets_dir + '/' + nossl_filename):
os.remove(targets_dir + '/' + nossl_filename)
if os.path.exists(targets_dir + '/' + topip_filename):
os.remove(targets_dir + '/' + topip_filename)
def set_country_path(targets):
if os.path.exists(targets_dir + geoasn_filename):
geoasn = pickle.load(open(targets_dir + geoasn_filename, "rb"))
else:
geoasn = {}
for t in targets:
if t.http_ip is not None:
cp = []
if t.http_path_as is not None:
for asn in t.http_path_as:
try:
country = geoasn[asn]
except:
stream = os.popen('whois -ch whois.cymru.com AS' + asn)
for i in range(2):
stream.readline() # Ignore lines of header output.
try:
cc = stream.readline().split()[0]
geoasn[asn] = cc
except:
geoasn[asn] = "XX"
country = geoasn[asn]
#TODO != cp[-1]
if len(cp) == 0 or country != cp[-1]:
cp.append(country)
t.http_path_country = cp
# Redo for https
if t.https_ip is not None:
cp = []
if t.https_path_as is not None:
for asn in t.https_path_as:
try:
country = geoasn[asn]
except:
stream = os.popen('whois -ch whois.cymru.com AS' + asn)
for i in range(2):
stream.readline() # Ignore lines of header output.
try:
cc = stream.readline().split()[0]
geoasn[asn] = cc
except:
geoasn[asn] = "XX"
country = geoasn[asn]
#TODO != cp[-1]
if len(cp) == 0 or country != cp[-1]:
cp.append(country)
t.https_path_country = cp
pickle.dump(geoasn, open(targets_dir + geoasn_filename, "wb"))
def getASpath(IPpath):
# Translate IP path to AS path
ASpath = []
for line in IPpath:
address = line[0]
# Ignore private IPs
# TODO ignore 172.16 - 172.31
if address[:7] != "192.168" \
and address[:3] != "10." \
and address[:4] != "127." \
and address != '*':
# Count looked up IPs
AS = lookupAS(address)
for item in AS:
if item is not None and item != 'NA':
if len(ASpath) == 0 or item != ASpath[-1]:
ASpath.append(item)
return ASpath
def lookupAS(address):
# Map an IP address to an AS
global as_map
AS = []
# statistics["stat_aslookup"] += 1
if address in as_map:
AS.append(as_map[address])
# statistics["stat_aslookup_failed"] += 1
else:
# cymru gives better replies than regular whois
stream = os.popen('whois -h whois.cymru.com ' + address)
if on_pl:
headerlines = 3
else:
headerlines = 1
for i in range(headerlines):
stream.readline() # Ignore lines of header output.
while True:
try:
# Read AS from whois info
tmp = stream.readline().split()[0]
if tmp != "NA":
# print "Mapped " + str(address) + " to AS" + str(tmp)
AS.append(tmp)
as_map[address] = tmp
except:
break
return AS
def set_traces(targets):
# Scamper needs an input file. Create tmp files of IP addresses for http/https
print "Running traces..."
ips = []
mapping = {}
if os.path.exists(targets_dir + topips_filename):
os.remove(targets_dir + topips_filename)
# Trace for HTTP
for t in targets:
if t.http_ip is not None:
ips.append(t.http_ip)
mapping[t.http_ip] = t
writefile(targets_dir + topips_filename, ips)
scamperTrace(mapping)
os.remove(targets_dir + '/' + topips_filename)
set_country_path(targets)
# # Redo for HTTPS
# for t in targets:
# if t.https_ip is not None:
# ips.append(t.https_ip)
# mapping[t.https_ip] = t
# writefile(targets_dir + topips_filename, ips)
# scamperTrace(mapping)
# os.remove(targets_dir + '/' + topips_filename)
def set_ssl(targets):
for t in targets:
if t.https_ip is not None:
if downloadCert(t):
t.ssl = True
set_ssl_properties(t)
else:
t.ssl = False
def set_ssl_properties(target):
filename = certs_dir + target.url + '.pem'
# try:
stream = os.popen('openssl x509 -fingerprint -in ' + filename + ' -text -noout')
# for i in range(headerlines):
# stream.readline() # Ignore lines of header output.
prev = ""
line = ""
# for i in range(100):
while True:
prev = line
line = stream.readline()[:-1]
if line == "" and prev == "":
break
# print line
if "Subject:" in line:
# print line.split("CN=")[1]
target.cert_subject = line.split("CN=")[1]
if "Issuer:" in line:
# print line.split("CN=")[1]
target.cert_subject = line.split("CN=")[1]
if "Subject Alternative Name" in prev:
# print line.lstrip()
target.cert_san = line.lstrip()
if "SHA1 Fingerprint" in line:
# print line.split('=')[1].translate(None, ':')
target.cert_fingerprint = line.split('=')[1].translate(string.maketrans('', ''), '!@#$')
if "Signature Algorithm" in line:
# print line.split(':')[1][1:]
target.cert_sign_algo = line.split(':')[1][1:]
if "Public Key Algorithm" in line:
# print line.split(':')[1][1:]
target.cert_pubkey_algo = line.split(':')[1][1:]
if "Public-Key" in line:
# print line.split(':')[1][2:-5]
target.cert_keylength = line.split(':')[1][2:-5]
# Check validation code from file
for l in open(filename,'r+').readlines():
# with open(filename, 'r+') as f:
# for line in f:
if "Verify return code:" in l:
target.cert_valid = l.split(':')[-1][:-1]
# print line.split()[-1]
# except:
# print "Failed to load cert from " + filename
def set_ciphers(targets):
for t in targets:
try:
set_host_ciphers(t)
except:
print "Failed to get supported ciphers for " + t.url
@timeout(30)
def set_host_ciphers(t):
if t.http_ip is not None:
print "Testing supported cipher suites for " + t.http_ip
else:
print "No IP set for " + t.url
return
try:
stream = os.popen('nmap --script ssl-enum-ciphers -p 443 ' + t.http_ip)
for i in range(7):
line = stream.readline()
prev_line = None
current_suit_list = []
current_suit = None
suites = {}
while True:
prev_line = line
line = stream.readline()
# Done
if len(line) > 10 and line[:10] == "Nmap done:":
break
line = line[:-1].split()
if len(line) > 1:
if line[1] == "NULL":
pass
elif line[1] == "ciphers:":
current_suit = prev_line[1]
current_suit_list = []
line = stream.readline().split()
while line[1] != "compressors:":
current_suit_list.append(line[1])
line = stream.readline().split()
suites[current_suit] = current_suit_list
# print suites
t.cipher_suites = suites
except:
print "Ooops..."
def scamperTrace(mapping):
# Run all traces using scamper
# flag -O planetlab is required for using planerlab raw sockets
# -c specifies command for each target
# -g 15 increases the acceptable number of dropped probes
# -P ICMP uses ICMP style tracing. No paris.
# -f read targets from file
# -p 1000 limit the number of packets per second
if on_pl:
stream = os.popen('./scamper/scamper -O planetlab -c "trace -g 15 -P icmp " -f -p 1000 ' + targets_dir + '/' + topips_filename)
else:
stream = os.popen('scamper -O planetlab -c "trace -g 15 -P icmp " -f -p 1000 ' + targets_dir + '/' + topips_filename)
# traces = {} # Completed traces
current = [] # Current trace
current_target = ""
counter = 0
# This is the IP of the PL node
host_ip = str(socket.gethostbyname(socket.gethostname()))
# PARSER
# Loop over output
while True:
line = stream.readline().split()
# Empty line, that means last trace complete.
# Translate IP path to AS path and add to traces
if line == []:
if current_target != "":
aspath = getASpath(current)
mapping[current_target].http_path_as = aspath
mapping[current_target].http_path_ip = current
counter += 1
print str(counter) + "/" + str(len(targets)) + ": Traced " + current_target + ": " + str(aspath)
else:
print "No trace output!"
break
# Start of a new trace. (that means prev. complete)
# Translate IP path to AS path and add to traces
if line[0] == 'traceroute':
if current != []:
aspath = getASpath(current)
mapping[current_target].http_path_as = aspath
mapping[current_target].http_path_ip = current
counter += 1
print str(counter) + "/" + str(len(targets)) + ": Traced " + current_target + ": " + str(aspath)
# Reset current path.
current = []
# Set new target
current_target = line[4]
# The line is a hop in some trace. Add it to current path (if not *)
elif line[1] != '*':
# print line
current.append((line[1], line[2] + line[3]))
def set_url2ip(targets):
# Lookup all ips
print "resolving IP addresses of all targets"
target_ips = []
for t in targets:
try:
ip = socket.gethostbyname(t.url)
print ip
t.http_ip = ip
# TODO include https
t.https_ip = ip
except:
pass
def writefile(filename, content):
if os.path.exists(filename):
os.remove(filename)
f = open(filename,'a+')
for line in content:
f.write(str(line) + '\n')
f.close()
def readfile(filename, maxLen = 1000000):
lines = []
count = 0
if os.path.exists(filename):
for line in open(filename,'r+').readlines():
# with open(filename, 'r+') as f:
# for line in f:
# Optional max length
if count >= maxLen:
break
else:
count += 1
# add to list, remove newline at end of line.
lines.append(line[:-1])
return lines
def downloadCert(target):
# Use threading to implement timeout
# TODO include client identifier in filename
# Download cert using openssl
site = target.url
print "Downloading cert for ",site
force = False
p = multiprocessing.Process(target=runOpenssl, args=(site, force))
p.start()
p.join(6)
if p.is_alive():
p.terminate()
p.join()
# If no cert downloaded, remove empy file and add to no-ssl list
if os.stat(certs_dir + site + '.pem').st_size == 0:
print "No SSL cert found for " + site
os.remove(certs_dir + '/' + site + '.pem')
return False # Download failed
return True # Download successful
def runOpenssl(site, force):
# Separate funktion to be used with threading
# Saves 0 byte cert if not ssl enabled, removed by calling function
cert_path = certs_dir + site + '.pem'
os.popen('openssl s_client -CApath /etc/ssl/certs/ -showcerts -connect ' + site + \
# os.popen('openssl s_client -CApath /etc/ssl/certs/ -showcerts -x509_strict -connect ' + site + \
':443 </dev/null 2>/dev/null > ' + cert_path)
def gen_log_space(limit, n):
# Create a vector of n logarithmmically spaced indices from 0 to limit.
result = [1]
if n>1: # just a check to avoid ZeroDivisionError
ratio = (float(limit)/result[-1]) ** (1.0/(n-len(result)))
while len(result)<n:
next_value = result[-1]*ratio
if next_value - result[-1] >= 1:
# safe zone. next_value will be a different integer
result.append(next_value)
else:
# problem! same integer. we need to find next_value by artificially incrementing previous value
result.append(result[-1]+1)
# recalculate the ratio so that the remaining values will scale correctly
ratio = (float(limit)/result[-1]) ** (1.0/(n-len(result)))
# round, re-adjust to 0 indexing (i.e. minus 1) and return np.uint64 array
return np.array(map(lambda x: round(x)-1, result), dtype=np.uint64)
### MAIN
if __name__ == "__main__":
# Set starttime
nr_of_targets = 40
start_time = time.time()
# read targets from file
tmp_targets = readfile(targets_dir + topsites_filename)
# Create logarithmically spaced vector of sample indices
logspace = gen_log_space(len(tmp_targets), nr_of_targets)
targets = []
for i in range(len(tmp_targets)):
if i in logspace:
targets.append(Website(tmp_targets[i], i + 1))
tmp_targets = []
set_url2ip(targets)
# Download certs:
set_ssl(targets)
# Check supported cipher suites
set_ciphers(targets)
# perform traces
# set_traces(targets)
# Set runtime in statistics
runtime = time.time() - start_time
print "Completed run in " + str(int(math.floor(runtime/60))) \
+ "m " + str(int(runtime % 60)) + "s."
writefile(output_dir + output_filename, targets)
| gpl-3.0 | -1,131,774,174,693,077,900 | 30.295416 | 135 | 0.526935 | false |
philipz/PyCV-time | opencv-official-samples/earlier/drawing.py | 7 | 5490 | #! /usr/bin/env python
from random import Random
import colorsys
print "OpenCV Python version of drawing"
import cv2.cv as cv
def random_color(random):
"""
Return a random color
"""
icolor = random.randint(0, 0xFFFFFF)
return cv.Scalar(icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)
if __name__ == '__main__':
# some "constants"
width = 1000
height = 700
window_name = "Drawing Demo"
number = 100
delay = 5
line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics
# create the source image
image = cv.CreateImage( (width, height), 8, 3)
# create window and display the original picture in it
cv.NamedWindow(window_name, 1)
cv.SetZero(image)
cv.ShowImage(window_name, image)
# create the random number
random = Random()
# draw some lines
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Line(image, pt1, pt2,
random_color(random),
random.randrange(0, 10),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some rectangles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Rectangle(image, pt1, pt2,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some ellipes
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
sz = (random.randrange(0, 200),
random.randrange(0, 200))
angle = random.randrange(0, 1000) * 0.180
cv.Ellipse(image, pt1, sz, angle, angle - 100, angle + 200,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# init the list of polylines
nb_polylines = 2
polylines_size = 3
pt = [0,] * nb_polylines
for a in range(nb_polylines):
pt [a] = [0,] * polylines_size
# draw some polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.PolyLine(image, pt, 1,
random_color(random),
random.randrange(1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some filled polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.FillPoly(image, pt,
random_color(random),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some circles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Circle(image, pt1, random.randrange(0, 300),
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some text
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
font = cv.InitFont(random.randrange(0, 8),
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 5) * 0.1,
random.randrange(0, 10),
line_type)
cv.PutText(image, "Testing text rendering!",
pt1, font,
random_color(random))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# prepare a text, and get it's properties
font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,
3, 3, 0.0, 5, line_type)
text_size, ymin = cv.GetTextSize("OpenCV forever!", font)
pt1 = ((width - text_size[0]) / 2, (height + text_size[1]) / 2)
image2 = cv.CloneImage(image)
# now, draw some OpenCV pub ;-)
for i in range(0, 512, 2):
cv.SubS(image2, cv.ScalarAll(i), image)
(r, g, b) = colorsys.hsv_to_rgb((i % 100) / 100., 1, 1)
cv.PutText(image, "OpenCV forever!",
pt1, font, cv.RGB(255 * r, 255 * g, 255 * b))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# wait some key to end
cv.WaitKey(0)
cv.DestroyAllWindows()
| mit | 8,093,668,142,328,921,000 | 32.888889 | 80 | 0.510929 | false |
lukas-bednar/pytest | testing/test_runner.py | 2 | 19892 | from __future__ import with_statement
import _pytest._code
import os
import py
import pytest
import sys
from _pytest import runner, main
class TestSetupState:
def test_setup(self, testdir):
ss = runner.SetupState()
item = testdir.getitem("def test_func(): pass")
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=item)
assert l
ss._pop_and_teardown()
assert not l
def test_teardown_exact_stack_empty(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
def test_setup_fails_and_failure_is_cached(self, testdir):
item = testdir.getitem("""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
""") # noqa
ss = runner.SetupState()
pytest.raises(ValueError, lambda: ss.prepare(item))
pytest.raises(ValueError, lambda: ss.prepare(item))
def test_teardown_multiple_one_fails(self, testdir):
r = []
def fin1(): r.append('fin1')
def fin2(): raise Exception('oops')
def fin3(): r.append('fin3')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
ss.addfinalizer(fin3, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops',)
assert r == ['fin3', 'fin1']
def test_teardown_multiple_fail(self, testdir):
# Ensure the first exception is the one which is re-raised.
# Ideally both would be reported however.
def fin1(): raise Exception('oops1')
def fin2(): raise Exception('oops2')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops2',)
class BaseFunctionalTests:
def test_passfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
pass
""")
rep = reports[1]
assert rep.passed
assert not rep.failed
assert rep.outcome == "passed"
assert not rep.longrepr
def test_failfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.passed
assert not rep.skipped
assert rep.failed
assert rep.when == "call"
assert rep.outcome == "failed"
#assert isinstance(rep.longrepr, ReprExceptionInfo)
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import pytest
def test_func():
pytest.skip("hello")
""")
rep = reports[1]
assert not rep.failed
assert not rep.passed
assert rep.skipped
assert rep.outcome == "skipped"
#assert rep.skipped.when == "call"
#assert rep.skipped.when == "call"
#assert rep.skipped == "%sreason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.path
#assert not rep.skipped.failurerepr
def test_skip_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
pytest.skip("hello")
def test_func():
pass
""")
print(reports)
rep = reports[0]
assert not rep.failed
assert not rep.passed
assert rep.skipped
#assert rep.skipped.reason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.lineno == 3
assert len(reports) == 2
assert reports[1].passed # teardown
def test_failure_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
rep = reports[0]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "setup"
assert len(reports) == 2
def test_failure_in_teardown_function(self, testdir):
reports = testdir.runitem("""
import pytest
def teardown_function(func):
raise ValueError(42)
def test_func():
pass
""")
print(reports)
assert len(reports) == 3
rep = reports[2]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "teardown"
#assert rep.longrepr.reprcrash.lineno == 3
#assert rep.longrepr.reprtraceback.reprentries
def test_custom_failure_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
return "hello"
""")
reports = testdir.runitem("""
import pytest
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "call"
#assert rep.failed.where.lineno == 3
#assert rep.failed.where.path.basename == "test_func.py"
#assert rep.failed.failurerepr == "hello"
def test_teardown_final_returncode(self, testdir):
rec = testdir.inline_runsource("""
def test_func():
pass
def teardown_function(func):
raise ValueError(42)
""")
assert rec.ret == 1
def test_exact_teardown_issue90(self, testdir):
rec = testdir.inline_runsource("""
import pytest
class TestClass:
def test_method(self):
pass
def teardown_class(cls):
raise Exception()
def test_func():
import sys
# on python2 exc_info is keept till a function exits
# so we would end up calling test functions while
# sys.exc_info would return the indexerror
# from guessing the lastitem
excinfo = sys.exc_info()
import traceback
assert excinfo[0] is None, \
traceback.format_exception(*excinfo)
def teardown_function(func):
raise ValueError(42)
""")
reps = rec.getreports("pytest_runtest_logreport")
print (reps)
for i in range(2):
assert reps[i].nodeid.endswith("test_method")
assert reps[i].passed
assert reps[2].when == "teardown"
assert reps[2].failed
assert len(reps) == 6
for i in range(3,5):
assert reps[i].nodeid.endswith("test_func")
assert reps[i].passed
assert reps[5].when == "teardown"
assert reps[5].nodeid.endswith("test_func")
assert reps[5].failed
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
assert 0
""")
reports = testdir.runitem("""
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
assert len(reports) == 2
rep = reports[0]
print(rep)
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "setup"
#assert rep.outcome.where.lineno == 3
#assert rep.outcome.where.path.basename == "test_func.py"
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
def test_systemexit_does_not_bail_out(self, testdir):
try:
reports = testdir.runitem("""
def test_func():
raise SystemExit(42)
""")
except SystemExit:
pytest.fail("runner did not catch SystemExit")
rep = reports[1]
assert rep.failed
assert rep.when == "call"
def test_exit_propagates(self, testdir):
try:
testdir.runitem("""
import pytest
def test_func():
raise pytest.exit.Exception()
""")
except pytest.exit.Exception:
pass
else:
pytest.fail("did not raise")
class TestExecutionNonForked(BaseFunctionalTests):
def getrunner(self):
def f(item):
return runner.runtestprotocol(item, log=False)
return f
def test_keyboardinterrupt_propagates(self, testdir):
try:
testdir.runitem("""
def test_func():
raise KeyboardInterrupt("fake")
""")
except KeyboardInterrupt:
pass
else:
pytest.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests):
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
def getrunner(self):
# XXX re-arrange this test to live in pytest-xdist
boxed = pytest.importorskip("xdist.boxed")
return boxed.forked_run_report
def test_suicide(self, testdir):
reports = testdir.runitem("""
def test_func():
import os
os.kill(os.getpid(), 15)
""")
rep = reports[0]
assert rep.failed
assert rep.when == "???"
class TestSessionReports:
def test_collect_result(self, testdir):
col = testdir.getmodulecol("""
def test_func1():
pass
class TestClass:
pass
""")
rep = runner.collect_one_node(col)
assert not rep.failed
assert not rep.skipped
assert rep.passed
locinfo = rep.location
assert locinfo[0] == col.fspath.basename
assert not locinfo[1]
assert locinfo[2] == col.fspath.basename
res = rep.result
assert len(res) == 2
assert res[0].name == "test_func1"
assert res[1].name == "TestClass"
def test_skip_at_module_scope(self, testdir):
col = testdir.getmodulecol("""
import pytest
pytest.skip("hello")
def test_func():
pass
""")
rep = main.collect_one_node(col)
assert not rep.failed
assert not rep.passed
assert rep.skipped
reporttypes = [
runner.BaseReport,
runner.TestReport,
runner.TeardownErrorReport,
runner.CollectReport,
]
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
def test_report_extra_parameters(reporttype):
if hasattr(py.std.inspect, 'signature'):
args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:]
else:
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
basekw = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
def test_callinfo():
ci = runner.CallInfo(lambda: 0, '123')
assert ci.when == "123"
assert ci.result == 0
assert "result" in repr(ci)
ci = runner.CallInfo(lambda: 0/0, '123')
assert ci.when == "123"
assert not hasattr(ci, 'result')
assert ci.excinfo
assert "exc" in repr(ci)
# design question: do we want general hooks in python files?
# then something like the following functional tests makes sense
@pytest.mark.xfail
def test_runtest_in_module_ordering(testdir):
p1 = testdir.makepyfile("""
def pytest_runtest_setup(item): # runs after class-level!
item.function.mylist.append("module")
class TestClass:
def pytest_runtest_setup(self, item):
assert not hasattr(item.function, 'mylist')
item.function.mylist = ['class']
def pytest_funcarg__mylist(self, request):
return request.function.mylist
def pytest_runtest_call(self, item, __multicall__):
try:
__multicall__.execute()
except ValueError:
pass
def test_hello1(self, mylist):
assert mylist == ['class', 'module'], mylist
raise ValueError()
def test_hello2(self, mylist):
assert mylist == ['class', 'module'], mylist
def pytest_runtest_teardown(item):
del item.function.mylist
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_outcomeexception_exceptionattributes():
outcome = runner.OutcomeException('test')
assert outcome.args[0] == outcome.msg
def test_pytest_exit():
try:
pytest.exit("hello")
except pytest.exit.Exception:
excinfo = _pytest._code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
def test_pytest_fail():
try:
pytest.fail("hello")
except pytest.fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Failed")
def test_pytest_fail_notrace(testdir):
testdir.makepyfile("""
import pytest
def test_hello():
pytest.fail("hello", pytrace=False)
def teardown_function(function):
pytest.fail("world", pytrace=False)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"world",
"hello",
])
assert 'def teardown_function' not in result.stdout.str()
def test_pytest_no_tests_collected_exit_status(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 0 items*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
testdir.makepyfile(test_foo="""
def test_foo():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == main.EXIT_OK
result = testdir.runpytest('-k nonmatch')
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 deselected*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
def test_exception_printing_skip():
try:
pytest.skip("hello")
except pytest.skip.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped")
def test_importorskip(monkeypatch):
importorskip = pytest.importorskip
def f():
importorskip("asdlkj")
try:
sys = importorskip("sys") # noqa
assert sys == py.std.sys
#path = pytest.importorskip("os.path")
#assert path == py.std.os.path
excinfo = pytest.raises(pytest.skip.Exception, f)
path = py.path.local(excinfo.getrepr().reprcrash.path)
# check that importorskip reports the actual call
# in this test the test_runner.py file
assert path.purebasename == "test_runner"
pytest.raises(SyntaxError, "pytest.importorskip('x y z')")
pytest.raises(SyntaxError, "pytest.importorskip('x=y')")
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
monkeypatch.setitem(sys.modules, "hello123", mod)
pytest.raises(pytest.skip.Exception, """
pytest.importorskip("hello123", minversion="1.3.1")
""")
mod2 = pytest.importorskip("hello123", minversion="1.3")
assert mod2 == mod
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_importorskip_imports_last_module_part():
ospath = pytest.importorskip("os.path")
assert os.path == ospath
def test_importorskip_dev_module(monkeypatch):
try:
mod = py.std.types.ModuleType("mockmodule")
mod.__version__ = '0.13.0.dev-43290'
monkeypatch.setitem(sys.modules, 'mockmodule', mod)
mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
assert mod2 == mod
pytest.raises(pytest.skip.Exception, """
pytest.importorskip('mockmodule1', minversion='0.14.0')""")
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_pytest_cmdline_main(testdir):
p = testdir.makepyfile("""
import pytest
def test_hello():
assert 1
if __name__ == '__main__':
pytest.cmdline.main([__file__])
""")
import subprocess
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
popen.communicate()
ret = popen.wait()
assert ret == 0
def test_unicode_in_longrepr(testdir):
testdir.makeconftest("""
import py
def pytest_runtest_makereport(__multicall__):
rep = __multicall__.execute()
if rep.when == "call":
rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
return rep
""")
testdir.makepyfile("""
def test_out():
assert 0
""")
result = testdir.runpytest()
assert result.ret == 1
assert "UnicodeEncodeError" not in result.stderr.str()
def test_failure_in_setup(testdir):
testdir.makepyfile("""
def setup_module():
0/0
def test_func():
pass
""")
result = testdir.runpytest("--tb=line")
assert "def setup_module" not in result.stdout.str()
def test_makereport_getsource(testdir):
testdir.makepyfile("""
def test_foo():
if False: pass
else: assert False
""")
result = testdir.runpytest()
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(['*else: assert False*'])
def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""Test that exception in dynamically generated code doesn't break getting the source line."""
import inspect
original_findsource = inspect.findsource
def findsource(obj, *args, **kwargs):
# Can be triggered by dynamically created functions
if obj.__name__ == 'foo':
raise IndexError()
return original_findsource(obj, *args, **kwargs)
monkeypatch.setattr(inspect, 'findsource', findsource)
testdir.makepyfile("""
import pytest
@pytest.fixture
def foo(missing):
pass
def test_fix(foo):
assert False
""")
result = testdir.runpytest('-vv')
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
def test_store_except_info_on_eror():
""" Test that upon test failure, the exception info is stored on
sys.last_traceback and friends.
"""
# Simulate item that raises a specific exception
class ItemThatRaises:
def runtest(self):
raise IndexError('TEST')
try:
runner.pytest_runtest_call(ItemThatRaises())
except IndexError:
pass
# Check that exception info is stored on sys
assert sys.last_type is IndexError
assert sys.last_value.args[0] == 'TEST'
assert sys.last_traceback
| mit | 2,071,119,078,804,903,400 | 31.503268 | 98 | 0.577921 | false |
klaricch/TransposonFigures | total_table.py | 1 | 2595 | #!/usr/bin/env python
import re
import sys
from collections import defaultdict
kin_mono="/Users/kristen/Documents/transposon_figure_data/tables/UT_with_monomorphic_kin_matrix_full.txt"
kin="/Users/kristen/Documents/transposon_figure_data/data/kin_matrix_full.txt"
ctcp="/Users/kristen/Documents/transposon_figure_data/data/CtCp_all_nonredundant.txt"
solo="/Users/kristen/Documents/transposon_figure_data/data/solo_class.txt"
classes={}
totals=defaultdict(int)
totals_class=defaultdict(int)
#need to add in non-monomospohic
with open(solo, 'r') as IN:
header=next(IN)
for line in IN:
line=line.rstrip()
items=line.split('\t')
te=items[0]
if te=="TC2":
te="Tc2"
classification=items[1]
classes[te]=classification
#need to add in non-monomospohic
with open(ctcp, 'r') as IN:
header=next(IN)
for line in IN:
line=line.rstrip()
items=line.split('\t')
te_info=items[3]
match=re.search("(\w+)_(\d+)_(.*)_(non-)?reference",te_info)
te=match.group(3)
classification=items[7]
te=items[0]
classification=items[1]
classes[te]=classification
with open(kin_mono, 'r') as IN:
next(IN)
for line in IN:
line=line.rstrip()
items=line.split('\t')
te=items[0]
strains=items[1:]
info=te.split("_")
chrom,pos=info[0:2]
site=info[-1]
transposon=info[2:-1]
transposon="_".join(transposon)
if transposon not in classes:
te_class="unknown"
else:
te_class=classes[transposon]
if "1" in strains: # a double check
totals[site]+=1
totals_class[te_class]+=1
non_mono=0
with open(kin, 'r') as IN:
next(IN)
for line in IN:
line=line.rstrip()
items=line.split('\t')
te=items[0]
strains=items[1:]
info=te.split("_")
chrom,pos=info[0:2]
site=info[-1]
transposon=info[2:-1]
transposon="_".join(transposon)
if "1" in strains and site=="R": # a double check
non_mono+=1
print non_mono
print totals["R"]
mono_sites=totals["R"]-non_mono
print mono_sites
OUT=open("Table1.txt",'w')
OUT.write("Type\tTotal\n")
overall_total=sum(totals.values())
OUT.write("Total Transposon Sites\t"+str(overall_total)+"\n")
OUT.write("Insertion Sites\t"+str(totals["NR"])+"\n")
OUT.write("Active Reference Sites\t"+str(non_mono)+"\n")
OUT.write("Monomorphic Reference Sites\t"+str(mono_sites)+"\n")
OUT.write("DNA Elements\t"+str(totals_class["dnatransposon"])+"\n")
OUT.write("Retrotransposon Elements\t"+str(totals_class["retrotransposon"])+"\n")
OUT.write("Unknown Transposon Elements\t"+str(totals_class["unknown"])+"\n")
OUT.close() | mit | -2,764,518,976,974,847,000 | 23.712871 | 105 | 0.667437 | false |
SKIRT/PTS | modeling/config/adapt_fit.py | 1 | 2495 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.modeling.config.component import definition
# -----------------------------------------------------------------
# Load environment and model suite
environment = load_modeling_environment_cwd()
runs = environment.fitting_runs
# -----------------------------------------------------------------
properties = ["representation", "filters", "ranges", "genetic", "grid", "units", "types"]
# -----------------------------------------------------------------
definition = definition.copy()
# -----------------------------------------------------------------
# The fitting run for which to adapt the configuration
if runs.empty: raise RuntimeError("No fitting runs are present")
elif runs.has_single: definition.add_fixed("name", "name of the fitting run", runs.single_name)
else: definition.add_required("name", "string", "name of the fitting run", choices=runs.names)
# -----------------------------------------------------------------
# Dust or stellar
definition.add_positional_optional("properties", "string_list", "properties to adapt", default=properties, choices=properties)
# -----------------------------------------------------------------
# Select certain properties
definition.add_optional("contains", "string", "only adapt properties containing this string in their name")
definition.add_optional("not_contains", "string", "don't adapt properties containing this string in their name")
definition.add_optional("exact_name", "string", "only adapt properties with this exact string as their name")
definition.add_optional("exact_not_name", "string", "don't adapt properties with this exact string as their name")
definition.add_optional("startswith", "string", "only adapt properties whose name starts with this string")
definition.add_optional("endswith", "string", "only adapt properties whose name starts with this string")
# -----------------------------------------------------------------
# Save
definition.add_flag("save", "save adapted properties", True)
# -----------------------------------------------------------------
| agpl-3.0 | -6,075,029,844,033,250,000 | 46.056604 | 126 | 0.543304 | false |
buntyke/GPy | GPy/util/datasets.py | 8 | 64882 | from __future__ import print_function
import csv
import os
import copy
import numpy as np
import GPy
import scipy.io
import zipfile
import tarfile
import datetime
import json
import re
import sys
from .config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
try:
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
#if available
import cPickle as pickle
except ImportError:
import pickle
#A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised
try:
from urllib2 import urlopen
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data=open(path).read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data=open(path).read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = raw_input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print(("Your response was a " + choice))
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
from itertools import izip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]):
for f, s in izip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print(file)
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print("Downloading ", url, "->", save_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urlopen(url+suffix)
except URLError as e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.getheaders("Content-Length")
if content_length_str:
file_size = int(content_length_str[0])
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print(status)
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print(('Acquiring resource: ' + dataset_name))
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print((dr['details']))
print('')
if dr['citation']:
print('Please cite:')
print((dr['citation']))
print('')
if dr['size']:
print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.'))
print('')
print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.'))
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print((dr['license']))
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print((dr['license']))
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
import itertools
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if dr.has_key('suffices'): zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in itertools.izip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in itertools.izip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if football_dict.has_key(string):
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
from matplotlib import pyplot as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.")
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print("Query terms: ", ', '.join(query_terms))
print("Fetching query:")
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urlopen(query).read()
print("Done.")
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.")
print("Query terms: ", ', '.join(query_terms))
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype='string')
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# Ankur Agarwal and Bill Trigg's silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset")
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print('')
else:
print("loading snps...")
snpsdf = read_pickle(preprocessed_data_paths[0])
print("loading metainfo...")
metadf = read_pickle(preprocessed_data_paths[1])
print("loading nan entries...")
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print("Extracting Archive {}...".format(files.name))
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0)
print(' '*(len(message)+1) + '\r', end=' ')
message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz")
print(message, end=' ')
if data is None:
data = inner.RPKM.to_frame()
data.columns = [file_info.name[:-18]]
gene_info = inner.Refseq_IDs.to_frame()
gene_info.columns = [file_info.name[:-18]]
else:
data[file_info.name[:-18]] = inner.RPKM
gene_info[file_info.name[:-18]] = inner.Refseq_IDs
# Strip GSM number off data index
rep = re.compile('GSM\d+_')
data.columns = data.columns.to_series().apply(lambda row: row[rep.match(row).end():])
data = data.T
# make sure the same index gets used
sample_info.index = data.index
# get the labels from the description
#rep = re.compile('fibroblast|\d+-cell|embryo|liver|early blastocyst|mid blastocyst|late blastocyst|blastomere|zygote', re.IGNORECASE)
sys.stdout.write(' '*len(message) + '\r')
sys.stdout.flush()
print()
print("Read Archive {}".format(files.name))
return data_details_return({'Y': data,
'series_info': info,
'sample_info': sample_info,
'gene_info': gene_info,
'summary': summary,
'design': design,
'genes': data.columns,
'labels': labels,
}, dataset)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set='swiss_roll'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'swiss_roll_data.mat'))
Y = mat_data['X_data'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'X': mat_data['X_data'], 'info': "The first " + str(num_samples) + " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def isomap_faces(num_samples=698, data_set='isomap_face_data'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'face_data.mat'))
Y = mat_data['images'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'poses' : mat_data['poses'], 'lights': mat_data['lights'], 'info': "The first " + str(num_samples) + " points from the face data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def simulation_BGPLVM():
mat_data = scipy.io.loadmat(os.path.join(data_path, 'BGPLVMSimulation.mat'))
Y = np.array(mat_data['Y'], dtype=float)
S = np.array(mat_data['initS'], dtype=float)
mu = np.array(mat_data['initMu'], dtype=float)
#return data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {'Y': Y, 'S': S,
'mu' : mu,
'info': "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB"}
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low= -1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1., lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1))
return {'X':X, 'Y':y, 'info': "Sampled " + str(num_samples) + " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1."}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = np.random.permutation(data['X'].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.", 'seed' : seed}
def toy_linear_1d_classification(seed=default_seed):
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'seed' : seed}
def olivetti_glasses(data_set='olivetti_glasses', num_training=200, seed=default_seed):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
y = np.load(os.path.join(path, 'has_glasses.np'))
y = np.where(y=='y',1,0).reshape(-1,1)
faces = scipy.io.loadmat(os.path.join(path, 'olivettifaces.mat'))['faces'].T
np.random.seed(seed=seed)
index = np.random.permutation(faces.shape[0])
X = faces[index[:num_training],:]
Xtest = faces[index[num_training:],:]
Y = y[index[:num_training],:]
Ytest = y[index[num_training:]]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by James Hensman."}, 'olivetti_faces')
def olivetti_faces(data_set='olivetti_faces'):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, 'att_faces.zip'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(path, 'orl_faces', 's'+str(subject+1), str(image+1) + '.pgm')
from GPy.util import netpbmfile
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return data_details_return({'Y': Y, 'lbls' : lbls, 'info': "ORL Faces processed to 64x64 images."}, data_set)
def xw_pen(data_set='xw_pen'):
if not data_available(data_set):
download_data(data_set)
Y = np.loadtxt(os.path.join(data_path, data_set, 'xw_pen_15.csv'), delimiter=',')
X = np.arange(485)[:, None]
return data_details_return({'Y': Y, 'X': X, 'info': "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275."}, data_set)
def download_rogers_girolami_data(data_set='rogers_girolami_data'):
if not data_available('rogers_girolami_data'):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'firstcoursemldata.tar.gz')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_100m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_marathon_men(data_set='olympic_marathon_men'):
if not data_available(data_set):
download_data(data_set)
olympics = np.genfromtxt(os.path.join(data_path, data_set, 'olympicMarathonTimes.csv'), delimiter=',')
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return data_details_return({'X': X, 'Y': Y}, data_set)
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
# def movielens_small(partNo=1,seed=default_seed):
# np.random.seed(seed=seed)
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.base')
# fid = open(fileName)
# uTrain = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# maxVals = np.amax(uTrain, axis=0)
# numUsers = maxVals[0]
# numFilms = maxVals[1]
# numRatings = uTrain.shape[0]
# Y = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTrain[:, 0]==i+1)
# Y[uTrain[ind, 1]-1, i] = uTrain[ind, 2]
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.test')
# fid = open(fileName)
# uTest = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# numTestRatings = uTest.shape[0]
# Ytest = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTest[:, 0]==i+1)
# Ytest[uTest[ind, 1]-1, i] = uTest[ind, 2]
# lbls = np.empty((1,1))
# lblstest = np.empty((1,1))
# return {'Y':Y, 'lbls':lbls, 'Ytest':Ytest, 'lblstest':lblstest}
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))
return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."}
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set)
def cmu_mocap_49_balance(data_set='cmu_mocap'):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info']
return data
def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '19',
'20', '21', '22', '23', '24', '25',
'26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info']
return data
def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4, data_set='cmu_mocap'):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
subject_dir = os.path.join(data_path, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf'))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.'
return data_details_return({'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel}, data_set)
| mit | 6,114,379,051,450,374,000 | 42.750506 | 435 | 0.604482 | false |
armink/rt-thread | bsp/nrf5x/nrf51822/rtconfig.py | 5 | 2499 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu='+CPU + ' -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| apache-2.0 | -5,450,535,595,100,505,000 | 26.163043 | 142 | 0.559424 | false |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-search-entitysearch/azure/cognitiveservices/search/entitysearch/models/error_response.py | 2 | 2492 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .response import Response
from msrest.exceptions import HttpOperationError
class ErrorResponse(Response):
"""The top-level response that represents a failed request.
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar contractual_rules: A list of rules that you must adhere to if you
display the item.
:vartype contractual_rules:
list[~azure.cognitiveservices.search.entitysearch.models.ContractualRulesContractualRule]
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:param errors: A list of errors that describe the reasons why the request
failed.
:type errors:
list[~azure.cognitiveservices.search.entitysearch.models.Error]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'contractual_rules': {'readonly': True},
'web_search_url': {'readonly': True},
'errors': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'contractual_rules': {'key': 'contractualRules', 'type': '[ContractualRulesContractualRule]'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Error]'},
}
def __init__(self, errors):
super(ErrorResponse, self).__init__()
self.errors = errors
self._type = 'ErrorResponse'
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
| mit | -8,826,176,483,032,432,000 | 35.115942 | 102 | 0.620385 | false |
PaulRumyantsev/python_QA | bdd/contacts_steps.py | 1 | 2764 | from pytest_bdd import given, when, then
from model.contacts import Contacts
import random
@given('a contact list')
def contact_list(db):
return db.get_contacts_list()
@given('a contact with <firstname>, <lastname>, <id> and <address>')
def new_contact(firstname, lastname, id, address):
return Contacts(firstname=firstname, lastname=lastname, id=id, address=address)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contacts.create(new_contact)
@then('the new contact list is equal to the old list with the added contact')
def verify_contact_added(db, app, contact_list, new_contact):
old_contacts = contact_list
assert len(old_contacts) + 1 == app.contacts.count()
new_contacts = db.get_contacts_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contacts.id_or_max) == sorted(new_contacts, key=Contacts.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(db, app):
if len(db.get_contacts_list()) == 0:
app.contacts.create(Contacts(firstname="some name"))
return db.get_contacts_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contacts.delete_contacts_by_id(random_contact.id)
@then('the new list is equal to the old list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contacts_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(random_contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contacts.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contacts.id_or_max)
@when('I modify the contact from the list')
def modify_group(app, random_contact):
app.contacts.modif_contact_by_id(id, random_contact)
@then('the new list is equal to the old list')
def verify_group_modify(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contacts_list()
assert len(old_contacts) == len(new_contacts)
old_contacts.id = random_contact
assert sorted(old_contacts, key=Contacts.id_or_max) == sorted(new_contacts, key=Contacts.id_or_max)
if check_ui:
assert sorted(old_contacts, key=Contacts.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contacts.id_or_max)
| apache-2.0 | 2,545,735,729,513,010,700 | 36.863014 | 103 | 0.680535 | false |
DedMemez/ODS-August-2017 | urllib2.py | 1 | 35969 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: urllib2
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from urllib import unwrap, unquote, splittype, splithost, quote, addinfourl, splitport, splittag, toBytes, splitattr, ftpwrapper, splituser, splitpasswd, splitvalue
from urllib import localhost, url2pathname, getproxies, proxy_bypass
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data = None, timeout = socket._GLOBAL_DEFAULT_TIMEOUT, cafile = None, capath = None, cadefault = False, context = None):
global _opener
if cafile or capath or cadefault:
if context is not None:
raise ValueError("You can't pass both context and any of cafile, capath, and cadefault")
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=cafile, capath=capath)
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif context:
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
class URLError(IOError):
def __init__(self, reason):
self.args = (reason,)
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
if fp is not None:
self.__super_init(fp, hdrs, url, code)
return
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
@property
def reason(self):
return self.msg
def info(self):
return self.hdrs
_cut_port_re = re.compile(':\\d+$')
def request_host(request):
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == '':
host = request.get_header('Host', '')
host = _cut_port_re.sub('', host, 1)
return host.lower()
class Request:
def __init__(self, url, data = None, headers = {}, origin_req_host = None, unverifiable = False):
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
return
def __getattr__(self, attr):
if attr[:12] == '_Request__r_':
name = attr[12:]
if hasattr(Request, 'get_' + name):
getattr(self, 'get_' + name)()
return getattr(self, attr)
raise AttributeError, attr
def get_method(self):
if self.has_data():
return 'POST'
else:
return 'GET'
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, 'unknown url type: %s' % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return header_name in self.headers or header_name in self.unredirected_hdrs
def get_header(self, header_name, default = None):
return self.headers.get(header_name, self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = 'Python-urllib/%s' % __version__
self.addheaders = [('User-agent', client_version)]
self.handlers = []
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, 'add_parent'):
raise TypeError('expected BaseHandler instance, got %r' % type(handler))
added = False
for meth in dir(handler):
if meth in ('redirect_request', 'do_open', 'proxy_open'):
continue
i = meth.find('_')
protocol = meth[:i]
condition = meth[i + 1:]
if condition.startswith('error'):
j = condition.find('_') + i + 1
kind = meth[j + 1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == 'open':
kind = protocol
lookup = self.handle_open
elif condition == 'response':
kind = protocol
lookup = self.process_response
elif condition == 'request':
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
pass
def _call_chain(self, chain, kind, meth_name, *args):
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
return
def open(self, fullurl, data = None, timeout = socket._GLOBAL_DEFAULT_TIMEOUT):
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
meth_name = protocol + '_request'
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
meth_name = protocol + '_response'
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data = None):
result = self._call_chain(self.handle_open, 'default', 'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol + '_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown', 'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
dict = self.handle_error['http']
proto = args[2]
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
def build_opener(*handlers):
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler,
UnknownHandler,
HTTPHandler,
HTTPDefaultErrorHandler,
HTTPRedirectHandler,
FTPHandler,
FileHandler,
HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
pass
def __lt__(self, other):
if not hasattr(other, 'handler_order'):
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
handler_order = 1000
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if not 200 <= code < 300:
response = self.parent.error('http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
max_repeats = 4
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if code in (301, 302, 303, 307) and m in ('GET', 'HEAD') or code in (301, 302, 303) and m == 'POST':
newurl = newurl.replace(' ', '%20')
newheaders = dict(((k, v) for k, v in req.headers.items() if k.lower() not in ('content-length', 'content-type')))
return Request(newurl, headers=newheaders, origin_req_host=req.get_origin_req_host(), unverifiable=True)
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
urlparts = urlparse.urlparse(newurl)
if not urlparts.path:
urlparts = list(urlparts)
urlparts[2] = '/'
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or newurl_lower.startswith('https://') or newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code, msg + " - Redirection to url '%s' is not allowed" % newurl, headers, fp)
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
else:
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if visited.get(newurl, 0) >= self.max_repeats or len(visited) >= self.max_redirections:
raise HTTPError(req.get_full_url(), code, self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = 'The HTTP server returned a redirect error that would lead to an infinite loop.\nThe last 30x error message was:\n'
def _parse_proxy(proxy):
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith('/'):
scheme = None
authority = proxy
else:
if not r_scheme.startswith('//'):
raise ValueError('proxy URL with no authority: %r' % proxy)
end = r_scheme.find('/', 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return (scheme,
user,
password,
hostport)
class ProxyHandler(BaseHandler):
handler_order = 100
def __init__(self, proxies = None):
if proxies is None:
proxies = getproxies()
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type, lambda r, proxy = url, type = type, meth = self.proxy_open: meth(r, proxy, type))
return
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return
else:
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
return
return self.parent.open(req, timeout=req.timeout)
return
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
if isinstance(uri, basestring):
uri = [uri]
if realm not in self.passwd:
self.passwd[realm] = {}
for default_port in (True, False):
reduced_uri = tuple([ self.reduce_uri(u, default_port) for u in uri ])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in (True, False):
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return (None, None)
def reduce_uri(self, uri, default_port = True):
parts = urlparse.urlsplit(uri)
if parts[1]:
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {'http': 80,
'https': 443}.get(scheme)
if dport is not None:
authority = '%s:%d' % (host, dport)
return (authority, path)
def is_suburi(self, base, test):
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm, authuri)
if user is not None:
return (user, password)
else:
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+realm=(["\']?)([^"\']*)\\2', re.I)
def __init__(self, password_mgr = None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
return
def http_error_auth_reqed(self, authreq, host, req, headers):
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if quote not in ('"', "'"):
warnings.warn('Basic Auth Realm was unquoted', UserWarning, 2)
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
return
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = '%s:%s' % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.get_header(self.auth_header, None) == auth:
return
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return
return
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate', url, req, headers)
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate', authority, req, headers)
return response
def randombytes(n):
if os.path.exists('/dev/urandom'):
f = open('/dev/urandom')
s = f.read(n)
f.close()
return s
else:
L = [ chr(random.randrange(0, 256)) for i in range(n) ]
return ''.join(L)
class AbstractDigestAuthHandler:
def __init__(self, passwd = None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
return
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
raise HTTPError(req.get_full_url(), 401, 'digest auth failed', headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
return
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
else:
return
def get_cnonce(self, nonce):
dig = hashlib.sha1('%s:%s:%s:%s' % (self.nonce_count,
nonce,
time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
opaque = chal.get('opaque', None)
except KeyError:
return
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return
else:
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = '%s:%s:%s' % (user, realm, pw)
A2 = '%s:%s' % (req.get_method(), req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = '%s:%s:%s:%s:%s' % (nonce,
ncvalue,
cnonce,
qop,
H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), '%s:%s' % (nonce, H(A2)))
else:
raise URLError("qop '%s' is not supported." % qop)
base = 'username="%s", realm="%s", nonce="%s", uri="%s", response="%s"' % (user,
realm,
nonce,
req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
algorithm = algorithm.upper()
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
KD = lambda s, d: H('%s:%s' % (s, d))
return (H, KD)
def get_entity_digest(self, data, chal):
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Authorization'
handler_order = 490
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate', host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel = 0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data():
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header('Content-type', 'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header('Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req, **http_conn_args):
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host, timeout=req.timeout, **http_conn_args)
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict(((k, v) for k, v in req.headers.items() if k not in headers)))
headers['Connection'] = 'close'
headers = dict(((name.title(), val) for name, val in headers.items()))
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = 'Proxy-Authorization'
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error as err:
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError:
r = h.getresponse()
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel = 0, context = None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req, context=self._context)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar = None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
return
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
if part:
res.append(part)
return [ part.strip() for part in res ]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
return None
class FileHandler(BaseHandler):
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and req.host and req.host != 'localhost':
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(socket.gethostbyname_ex('localhost')[2] + socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO('Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % (mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or not port and _safe_gethostbyname(host) in self.get_names():
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError as msg:
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error as msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ''
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += 'Content-type: %s\n' % mtype
if retrlen is not None and retrlen >= 0:
headers += 'Content-length: %d\n' % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors as msg:
raise URLError, 'ftp error: %s' % msg, sys.exc_info()[2]
return
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout, persistent=False)
return fw
class CacheFTPHandler(FTPHandler):
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = (user,
host,
port,
'/'.join(dirs),
timeout)
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear() | apache-2.0 | -4,667,808,227,987,845,000 | 30.940476 | 164 | 0.536962 | false |
MoyTW/AStar | AStar.py | 1 | 6397 | __author__ = 'Travis Moy'
# This is an implementation of the A* pathfinding algorithm.
#
# It is designed to be usable in as many different situations as possible, with as many different types of nodes as
# possible. Therefore, it does not directly manipulate any of the data, and may be used with any data format the user
# would like. For example, this code may be used with a standard 2-D grid of cells which can be traversed diagonally,
# a grid of hexes, a navigation mesh, or any arbitrary graph.
#
# However, this flexibility means that it does not touch the underlying data, and therefore the user must define the
# following functions:
#
# A function which determines what nodes are adjacent to any given node with the following signature:
# iterable<node_coordinate> f(node_coordinate)
#
# A function to calculate the move cost for moving to an adjacent node with the following signature:
# int f(node_coordinate, node_coordinate)
# Note that the order in which the adjacent nodes are presented will change the bias of the algorithm! In case
# of ties in f-values between two options, it will go with the first presented, which is the first option
# from this function.
#
# A function which determines whether a given node is passable with the following signature:
# bool f(node_coordinate)
#
# A function which estimates the movement cost between two given nodes with the following signature:
# int f(node_coordinate, node_coordinate)
#
# Given these functions, find_path() will return a tuple of coordinates (in whatever format you supplied them)
# indicating the path to be taken from the origin to the destination.
# Returns a tuple of coordinates, from the origin to the destination.
def find_path(origin, destination,
func_list_adjacent_nodes,
func_calculate_move_cost,
func_node_is_passable,
func_estimate_cost):
if origin == destination:
return []
start_node = NodeEntry(origin, None, 0, func_estimate_cost(origin, destination))
open_nodes = [start_node]
closed_nodes = []
done = False
while len(open_nodes) > 0 and not done:
done = _process_next_node(open_nodes, closed_nodes, destination, func_list_adjacent_nodes,
func_node_is_passable, func_calculate_move_cost, func_estimate_cost)
return _return_path(origin, destination, closed_nodes)
def _process_next_node(open_nodes, closed_nodes, destination,
func_list_adjacent_nodes,
func_node_is_passable,
func_calculate_move_cost,
func_estimate_cost):
open_nodes.sort() # Not ideal, because it sorts every time - even if we don't need to.
target = open_nodes.pop(0)
closed_nodes.append(target)
# !End condition! We have found the target!
if target == destination:
return True
adjacent_nodes = func_list_adjacent_nodes(target.coordinates)
_process_adjacent_nodes(target, adjacent_nodes, open_nodes, closed_nodes, destination,
func_node_is_passable, func_calculate_move_cost, func_estimate_cost)
return False
def _process_adjacent_nodes(origin, adjacent_nodes, open_nodes, closed_nodes, destination,
func_node_is_passable,
func_calculate_move_cost,
func_estimate_cost):
for coordinates in adjacent_nodes:
if func_node_is_passable(coordinates) and (coordinates not in closed_nodes):
new_g = origin.g + func_calculate_move_cost(origin.coordinates, coordinates)
if coordinates not in open_nodes:
new_node = NodeEntry(coordinates=coordinates,
parent=origin,
g=new_g,
h=func_estimate_cost(coordinates, destination))
open_nodes.append(new_node)
else:
existing_node = open_nodes[open_nodes.index(coordinates)]
if new_g < existing_node.g:
existing_node.g = new_g
existing_node.parent = origin
def _return_path(origin, destination, closed_nodes):
if destination in closed_nodes:
path = list()
end_node = closed_nodes[closed_nodes.index(destination)]
parent_node = end_node.parent
path.insert(0, end_node.coordinates)
while parent_node != origin:
end_node = parent_node
parent_node = end_node.parent
path.insert(0, end_node.coordinates)
return path
else:
return []
# NodeEntry is an internal helper class.
#
# g = cost to move to this node along the best known path
# h = estimate of cost to destination from this node
# f = total estimated cost for travel to destination from this node
#
# This class has to methods of comparison - equality utilizing coordinates, and gt/lt/le/ge utilizing f.
# See the comments below for more info.
class NodeEntry(object):
def __init__(self, coordinates, parent, g, h):
self.coordinates = coordinates
self.parent = parent
self.g = g
self.h = h
@property
def f(self):
return self.g + self.h
# So, we're doing something not recommended here.
#
# We've defined __eq__ and __ne__, but *not* the other rich comparison methods.
# We've also defined __cmp__.
# What this will do is allows us to use the equality-based functions ('in' or '==') while also allowing us to use
# the built-in sorting algorithms, as the sorting algorithms fall back on __cmp__ if the rich comparison operators
# (__lt__, __gt__, etc) are not defined.
#
# This is a terrible, hacky thing to do, and it doesn't work in Python 3, where they removed __cmp__. Never do it.
def __ne__(self, other):
return not self == other
# We allow valid comparisons with coordinates, as well as NodeEntry instances.
#
# This is another hacky thing to do which should be avoided.
def __eq__(self, other):
try:
return self.coordinates == other.coordinates
except AttributeError:
return self.coordinates == other
def __cmp__(self, other):
return cmp(self.f, other.f) | mit | -3,870,866,340,722,978,300 | 41.939597 | 120 | 0.643114 | false |
binhqnguyen/ln | nsc/scons-local-1.2.0.d20090223/SCons/Options/BoolOption.py | 19 | 1999 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/BoolOption.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def BoolOption(*args, **kw):
global warned
if not warned:
msg = "The BoolOption() function is deprecated; use the BoolVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return apply(SCons.Variables.BoolVariable, args, kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -5,560,263,739,764,696,000 | 38.98 | 97 | 0.753377 | false |
mlperf/training_results_v0.7 | DellEMC/benchmarks/maskrcnn/implementation/pytorch/maskrcnn_benchmark/solver/lr_scheduler.py | 4 | 2161 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear", "mlperf_linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
# optional offset to each base_lr
delta = 0.
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
# MLPerf-specific warmup definition
elif self.warmup_method == "mlperf_linear":
delta = (self.warmup_iters - self.last_epoch) * self.warmup_factor
return [
(base_lr - delta)
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| apache-2.0 | 1,659,410,019,060,927,500 | 35.016667 | 82 | 0.588154 | false |
dcristoloveanu/qpid-proton | examples/python/client.py | 4 | 2451 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import optparse
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container, DynamicNodeProperties
class Client(MessagingHandler):
def __init__(self, url, requests):
super(Client, self).__init__()
self.url = url
self.requests = requests
def on_start(self, event):
self.sender = event.container.create_sender(self.url)
self.receiver = event.container.create_receiver(self.sender.connection, None, dynamic=True)
def next_request(self):
if self.receiver.remote_source.address:
req = Message(reply_to=self.receiver.remote_source.address, body=self.requests[0])
self.sender.send(req)
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.next_request()
def on_message(self, event):
print "%s => %s" % (self.requests.pop(0), event.message.body)
if self.requests:
self.next_request()
else:
event.connection.close()
REQUESTS= ["Twas brillig, and the slithy toves",
"Did gire and gymble in the wabe.",
"All mimsy were the borogroves,",
"And the mome raths outgrabe."]
parser = optparse.OptionParser(usage="usage: %prog [options]",
description="Send requests to the supplied address and print responses.")
parser.add_option("-a", "--address", default="localhost:5672/examples",
help="address to which messages are sent (default %default)")
opts, args = parser.parse_args()
Container(Client(opts.address, args or REQUESTS)).run()
| apache-2.0 | 4,088,218,082,006,021,000 | 37.296875 | 104 | 0.685027 | false |
gsmaxwell/phase_offset_rx | gr-blocks/python/qa_peak_detector.py | 1 | 2755 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import blocks_swig as blocks
class test_peak_detector(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_01(self):
tb = self.tb
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
src = gr.vector_source_f(data, False)
regen = blocks.peak_detector_fb()
dst = gr.vector_sink_b()
tb.connect(src, regen)
tb.connect(regen, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
def test_02(self):
tb = self.tb
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
src = gr.vector_source_i(data, False)
regen = blocks.peak_detector_ib()
dst = gr.vector_sink_b()
tb.connect(src, regen)
tb.connect(regen, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
def test_03(self):
tb = self.tb
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
src = gr.vector_source_s(data, False)
regen = blocks.peak_detector_sb()
dst = gr.vector_sink_b()
tb.connect(src, regen)
tb.connect(regen, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_peak_detector, "test_peak_detector.xml")
| gpl-3.0 | 3,528,498,798,045,172,000 | 27.112245 | 70 | 0.554265 | false |
yulia-baturina/python_training | test/test_add_and_remove_contact_from_group.py | 1 | 2243 | # -*- coding: utf-8 -*-
from model.contact import Contact
from model.group import Group
import random
def test_add_random_contact_to_random_group(app, orm):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="new", header="header", footer="footer"))
if len(orm.get_contact_list()) == 0:
app.contact.create(Contact(firstname="first", lastname="last", nickname="nick",
company="company", homePhone="+11111111111", email="[email protected]"))
groups = orm.get_group_list()
group = random.choice(groups)
group_index = groups.index(group)
contacts = orm.get_contact_list()
contact = random.choice(contacts)
old_contacts_in_group = orm.get_contacts_in_group(group)
app.contact.assign_contact_by_id_to_group(contact.id, group.name)
new_groups = orm.get_group_list()
new_group = new_groups[group_index]
new_contacts_in_group = orm.get_contacts_in_group(new_group)
old_contacts_in_group.append(contact)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contacts_in_group, key=Contact.id_or_max)
def test_add_and_remove_random_contact_from_random_group(app, orm):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="new", header="header", footer="footer"))
if len(orm.get_contact_list()) == 0:
app.contact.create(Contact(firstname="first", lastname="last", nickname="nick",
company="company", homePhone="+11111111111", email="[email protected]"))
groups = orm.get_group_list()
group = random.choice(groups)
group_index = groups.index(group)
contacts = orm.get_contact_list()
contact = random.choice(contacts)
app.contact.assign_contact_by_id_to_group(contact.id, group.name)
old_groups = orm.get_group_list()
old_group = old_groups[group_index]
old_contacts_in_group = orm.get_contacts_in_group(old_group)
app.group.remove_contact_by_id_from_group(contact.id)
new_groups = orm.get_group_list()
new_group = new_groups[group_index]
new_contacts_in_group = orm.get_contacts_in_group(new_group)
old_contacts_in_group.remove(contact)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contacts_in_group, key=Contact.id_or_max)
| apache-2.0 | 3,274,656,067,477,043,000 | 46.723404 | 119 | 0.690147 | false |
51reboot/homework-arch-4 | 7/caozhenqiang/agent/main.py | 1 | 2360 | #!/usr/bin/python
import Queue
import threading
import time
import json
import urllib2
import socket
import commands
from moniItems import mon
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from simpleNet.nbNetFramework import sendData_mh
#trans_l = ['localhost:50000']
trans_l = ['localhost:60000','localhost:50000']
class porterThread (threading.Thread):
def __init__(self, name, q, ql=None, interval=None):
threading.Thread.__init__(self)
self.name = name
self.q = q
#self.queueLock = ql
self.interval = interval
self.sock_l = [None]
def run(self):
#print "Starting %s" % self.name
if self.name == 'collect':
self.put_data()
elif self.name == 'sendjson':
self.get_data()
def put_data(self):
m = mon()
atime=int(time.time())
while 1:
data = m.runAllGet()
#print data
#self.queueLock.acquire()
self.q.put(data)
#self.queueLock.release()
btime=int(time.time())
#print '%s %s' % (str(data), self.interval-((btime-atime)%30))
time.sleep(self.interval-((btime-atime)%self.interval))
def get_data(self):
while 1:
print "get~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
if not self.q.empty():
data = self.q.get()
print data
while 1:
ret = sendData_mh(self.sock_l, trans_l, json.dumps(data))
print "Agent: trying to sendData_mh"
if ret == "OK" :
time.sleep(self.interval)
print "Agent: sendData_mh successfully"
break
else:
time.sleep(self.interval)
else:
print "Agent: sendData_mh unsuccessfully"
continue
def startTh():
q1 = Queue.Queue(10)
ql1 = threading.Lock()
collect = porterThread('collect', q1, ql1, interval=3)
collect.start()
time.sleep(0.5)
sendjson = porterThread('sendjson', q1, ql1, interval=3)
sendjson.start()
q2 = Queue.Queue(10)
print "start"
collect.join()
sendjson.join()
if __name__ == "__main__":
startTh()
| apache-2.0 | 4,533,556,743,450,234,000 | 28.5 | 77 | 0.514407 | false |
sciCloud/OLiMS | lims/upgrade/to3023.py | 2 | 1721 | from dependencies.dependency import aq_inner
from dependencies.dependency import aq_parent
from dependencies.dependency import getToolByName
from dependencies.dependency import REFERENCE_CATALOG
from lims.permissions import AddStorageLocation
def upgrade(tool):
""" Add Storage locacations to ARs and Samples.
"""
# Hack prevent out-of-date upgrading
# Related: PR #1484
# https://github.com/bikalabs/Bika-LIMS/pull/1484
from lims.upgrade import skip_pre315
if skip_pre315(aq_parent(aq_inner(tool))):
return True
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'typeinfo')
setup.runImportStepFromProfile('profile-bika.lims:default', 'propertiestool')
setup.runImportStepFromProfile('profile-bika.lims:default', 'factorytool')
setup.runImportStepFromProfile('profile-bika.lims:default', 'controlpanel')
setup.runImportStepFromProfile('profile-bika.lims:default', 'cssregistry')
setup.runImportStepFromProfile('profile-bika.lims:default', 'content')
setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow')
setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow-csv')
wf = getToolByName(portal, 'portal_workflow')
wf.updateRoleMappings()
mp = portal.manage_permission
mp(AddStorageLocation, ['Manager', 'Owner', 'LabManager', ], 1)
at = getToolByName(portal, 'archetype_tool')
at.setCatalogsByType('StorageLocation', ['bika_setup_catalog', 'portal_catalog'])
bika_setup = portal._getOb('bika_setup')
obj = bika_setup._getOb('bika_storagelocations')
obj.unmarkCreationFlag()
obj.reindexObject()
| agpl-3.0 | 3,020,037,346,150,486,000 | 39.97619 | 85 | 0.741429 | false |
rsutormin/narrative | src/biokbase/narrative/common/log_proxy.py | 7 | 20442 | """
Code to proxy logs from the narrative, over a socket, to a DB.
The proxy will tend to have root permissions so it can read protected
configuration files.
"""
__author__ = 'Dan Gunter <[email protected]>'
__date__ = '8/22/14'
import asyncore
from datetime import datetime
from dateutil.tz import tzlocal
import logging
from logging import handlers
import pymongo
import pickle
import re
import socket
import struct
import time
import yaml
# Local
from biokbase import narrative
from biokbase.narrative.common.kvp import parse_kvp
from biokbase.narrative.common.url_config import URLS
from biokbase.narrative.common import log_common
EVENT_MSG_SEP = log_common.EVENT_MSG_SEP
g_log = None # global logger
LOGGER_NAME = "log_proxy" # use this name for logger
m_fwd = None # global forwarder object
class DBAuthError(Exception):
def __init__(self, host, port, db):
msg = "Authorization failed to {host}:{port:d}/{db}".format(
host=host, port=port, db=db)
Exception.__init__(self, msg)
class Configuration(object):
def __init__(self, input_file):
"""
Read and parse configuration from input file.
Format is any valid YAML variant, which includes JSON.
But the basic thing to know is that "name: value" is supported.
:param input_file: Name or object to read from
:type input_file: str or file or None
:return: Configuration data
:rtype: dict
:raises: IOError, ValueError
"""
self._obj = {}
if input_file:
if not isinstance(input_file, file):
input_file = open(str(input_file), 'r')
try:
self._obj = yaml.load(input_file)
if self._obj is None:
raise ValueError("Empty configuration file")
except (IOError, ValueError):
raise
except Exception as err:
raise ValueError("Unknown error while parsing '{}': {}"
.format(input_file, err))
class ProxyConfiguration(Configuration):
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 32001
def __init__(self, conf):
Configuration.__init__(self, conf)
@property
def host(self):
return self._obj.get('host', self.DEFAULT_HOST)
@property
def port(self):
return self._obj.get('port', self.DEFAULT_PORT)
class ProxyConfigurationWrapper(ProxyConfiguration):
def __init__(self, conf):
ProxyConfiguration.__init__(self, conf)
if conf is None:
self._obj['host'] = URLS.log_proxy_host
self._obj['port'] = URLS.log_proxy_port
class DBConfiguration(Configuration):
"""
Parameters controlling connection to remote database server.
Use superclass to parse YAML file.
Spurious/unknown fields and sections are ignored.
`db_host` and `db_port` are set to `DEFAULT_DB_HOST` and `DEFAULT_DB_PORT`
if not given.
If `user` is omitted then no authentication will be attempted. If `user`
is given then `password` is required.
The `db` and `collection` are required to specify where data is inserted.
Example::
db_host: <database server listen addr>
db_port: <database server listen port>
user: <database user name>
password: <database password>
db: <MongoDB database name>
collection: <MongoDB collection name>
"""
DEFAULT_DB_HOST = 'localhost'
DEFAULT_DB_PORT = 27017
def __init__(self, *a, **k):
"""
Call superclass to parse configuration file, then perform some
sanity checks on the data.
:param a: Positional arguments for superclass constructor
:param k: Keyword arguments for superclass constructor
:raises: KeyError if required field is missing from configuration,
ValueError if the type/form of other values makes no sense.
"""
Configuration.__init__(self, *a, **k)
self._check_db_collection()
self._check_auth_keys()
def _check_db_collection(self):
d, c = 'db', 'collection'
for k in d, c:
if k not in self._obj:
raise KeyError('Missing {k} from configuration'.format(k=k))
d, c = self._obj[d], self._obj[c] # replace key with value
total_len = len(d) + len(c) + 1
if total_len > 123:
raise ValueError("Database + collection name is too long, "
"{:d} > 123: '{}.{}'".format(total_len, d, c))
# Check DB name for illegal chars
m = re.match('^[a-zA-Z][_a-zA-Z0-9]*', d)
if m is None:
raise ValueError("Initial character not a letter in database '{}'"
.format(d))
if m.end() < len(d):
raise ValueError("Bad character at {:d}: '{}' in database '{}'"
.format(m.end() + 1, d[m.end()], d))
# Check collection name for illegal chars
m = re.match('^[a-zA-Z][_.a-zA-Z0-9]*', c)
if m is None:
raise ValueError("Initial character not a letter in collection '{}'"
.format(c))
if m.end() < len(d):
raise ValueError("Bad character at {:d}: '{}' in collection '{}'"
.format(m.end() + 1, c[m.end()], c))
def _check_auth_keys(self):
u, p = 'user', 'password'
if u in self._obj:
if not p in self._obj:
raise KeyError('Key "{}" given but "{}" missing'.format(u, p))
elif p in self._obj:
del self._obj[p] # just delete unused password
## Expose configuration values as class properties
@property
def db_host(self):
return self._obj.get('db_host', self.DEFAULT_DB_HOST)
@property
def db_port(self):
return self._obj.get('db_port', self.DEFAULT_DB_PORT)
@property
def user(self):
return self._obj.get('user', None)
@property
def password(self):
return self._obj.get('password', None)
@property
def db(self):
return self._obj.get('db', None)
@property
def collection(self):
return self._obj['collection']
class SyslogConfiguration(Configuration):
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 514
DEFAULT_FACILITY = 'user'
DEFAULT_PROTO = 'udp'
def __init__(self, *a, **k):
Configuration.__init__(self, *a, **k)
self.host, self.port, self.facility, self.proto = None, None, None, None
for default in filter(lambda x: x.startswith('DEFAULT_'),
vars(SyslogConfiguration).keys()):
# transform name to corresponding property in config file
prop = default.replace('DEFAULT_', 'syslog_').lower()
name = prop[7:] # strip 'syslog_' prefix
# set attr to value in config, or default
value = self._obj.get(prop, getattr(self, default))
setattr(self, name, value)
self.port = int(self.port)
self.proto = self.proto.lower()
# validation
h = handlers.SysLogHandler()
try:
h.encodePriority(self.facility, 'info')
except KeyError:
raise ValueError("Invalid syslog facility '{}', must be one of: {}"
.format(self.facility,
', '.join(h.facility_names)))
if not self.proto in ('tcp', 'udp'):
raise ValueError("Invalid syslog protocol '{}', must be either "
"'udp' or 'tcp'")
# keyword args for logging.SysLogHandler constructor
self.handler_args = {
'address': (self.host, self.port),
'facility': self.facility,
'socktype': {'tcp': socket.SOCK_STREAM,
'udp': socket.SOCK_DGRAM}[self.proto]}
def get_sample_config():
"""Get a sample configuration."""
fields = [
'# proxy listen host and port',
'host: {}'.format(ProxyConfiguration.DEFAULT_HOST),
'port: {}'.format(ProxyConfiguration.DEFAULT_PORT),
'# mongodb server host and port',
'db_host: {}'.format(DBConfiguration.DEFAULT_DB_HOST),
'db_port: {}'.format(DBConfiguration.DEFAULT_DB_PORT),
'# mongodb server user/pass and database',
'user: joeschmoe',
'password: letmein',
'db: mymongodb',
'collection: kbaselogs',
'# syslog destination',
'syslog_facility: {}'.format(SyslogConfiguration.DEFAULT_FACILITY),
'syslog_host: {}'.format(SyslogConfiguration.DEFAULT_HOST),
'syslog_port: {}'.format(SyslogConfiguration.DEFAULT_PORT),
'syslog_proto: {}'.format(SyslogConfiguration.DEFAULT_PROTO)
]
return '\n'.join(fields)
class LogForwarder(asyncore.dispatcher):
__host, __ip = None, None
def __init__(self, pconfig, meta=None, db=None, syslog=None):
asyncore.dispatcher.__init__(self)
self._meta = meta
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((pconfig.host, pconfig.port))
self.listen(5)
# only do this once; it takes ~5 sec
if self.__host is None:
g_log.info(
"Getting fully qualified domain name (may take a few seconds)")
self.__host = socket.getfqdn()
try:
self.__ip = socket.gethostbyname(self.__host)
except socket.gaierror:
self.__ip = '0.0.0.0'
g_log.info(
"Done getting fully qualified domain name: {}".format(self.__host))
ver = narrative.version()
self._meta.update({'host': {'name': self.__host, 'ip': self.__ip},
'ver': {'str': str(ver), 'major': ver.major,
'minor': ver.minor, 'patch': ver.patch}})
# handlers
self._hnd = []
if db:
coll = self.connect_mongo(db)
self._hnd.append(MongoDBHandler(coll))
if syslog:
h = logging.handlers.SysLogHandler(**syslog.handler_args)
self._hnd.append(SyslogHandler(h))
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
g_log.info('Accepted connection from {}'.format(addr))
LogStreamForwarder(sock, self._hnd, self._meta)
@staticmethod
def connect_mongo(config):
"""
Connect to configured MongoDB collection.
:param config: Params for connecting
:type config: Configuration
:return: The collection object
:rtype: pymongo.Collection
:raise: DBAuthError if auth fails
"""
client = pymongo.MongoClient(host=config.db_host, port=config.db_port)
database = client[config.db]
if config.user is not None:
if not database.authenticate(config.user, password=config.password):
raise DBAuthError(config.db_host, config.db_port, config.db)
collection = database[config.collection]
return collection
class LogStreamForwarder(asyncore.dispatcher):
def __init__(self, sock, hnd, meta):
"""Forward logs coming in on socket `sock` to handler list `hnd`.
"""
asyncore.dispatcher.__init__(self, sock)
self._meta, self._hnd = meta, hnd
self._hdr, self._dbg = '', g_log.isEnabledFor(logging.DEBUG)
self._body, self._body_remain = '', 0
def writable(self):
return False
def handle_read(self):
# read header
if self._body_remain == 0:
chunk = self.recv(4 - len(self._hdr))
self._hdr += chunk
if len(self._hdr) < 4:
return
# Parse data and calc. body
size = struct.unpack('>L', self._hdr)[0]
if size > 65536:
g_log.error("Log message size ({:d}) > 64K, possibly corrupt header"
": <{}>".format(size, self._hdr))
self._hdr = ''
return
self._body_remain = size
self._hdr = ''
if self._dbg:
g_log.debug("Expect msg size={}".format(size))
# read body data
if self._body_remain > 0:
chunk = self.recv(self._body_remain)
self._body += chunk
self._body_remain -= len(chunk)
if self._body_remain == 0:
# got whole body, now process it
try:
record = pickle.loads(chunk)
except Exception as err:
g_log.error("Could not unpickle record: {}".format(err))
self._body = ''
return
if self._dbg:
g_log.debug("handle_read: record={}".format(record))
meta = self._meta or {}
# Dispatch to handlers
for h in self._hnd:
if self._dbg:
g_log.debug("Dispatch to handler {}".format(h))
h.handle(record, meta)
# Handlers
class Handler(object):
# extract these from the incoming records,
# incoming name is in key, outgoing name is in value
EXTRACT_META = {
'session': 'session_id',
'narrative': 'narr',
'client_ip': 'client_ip',
'user': 'user'}
def _get_record_meta(self, record):
return {val: record.get(key, '')
for key, val in self.EXTRACT_META.iteritems()}
class MongoDBHandler(Handler):
def __init__(self, coll):
self._coll = coll
def handle(self, record, meta):
try:
kbrec = DBRecord(record, strict=True)
except ValueError as err:
g_log.error("Bad input to 'handle_read': {}".format(err))
return
kbrec.record.update(meta)
kbrec.record.update(self._get_record_meta(kbrec.record))
self._coll.insert(kbrec.record)
class SyslogHandler(Handler):
def __init__(self, log_handler):
f = logging.Formatter("%(levelname)s %(asctime)s %(name)s %(message)s")
f.converter = time.gmtime
log_handler.setFormatter(f)
log_handler.setLevel(logging.DEBUG - 1) # everything!
self._hnd = log_handler
self._dbg = g_log.isEnabledFor(logging.DEBUG)
def handle(self, record, meta):
if self._dbg:
g_log.debug("SyslogHandler: rec.in={}".format(record))
kvp = meta.copy()
kvp.update(self._get_record_meta(record))
message = record.get('message', record.get('msg', ''))
record['msg'] = message + ' ' + log_common.format_kvps(kvp)
if 'message' in record:
del record['message'] #??
if self._dbg:
g_log.debug("SyslogHandler: rec.out={}".format(record))
logrec = logging.makeLogRecord(record)
self._hnd.emit(logrec)
# Log record
class DBRecord(object):
"""Convert logged record (dict) to object that we can store in a DB.
"""
def __init__(self, record, strict=False):
"""Process input record. Results are stored in `record` attribute.
Anything should parse unless `strict` is passed in, which is still
pretty lenient but requires the "event;message" format.
:param record: Input record which is *modified in-place*
:type record: dict
"""
self.strict = strict
self.record = record.copy()
try:
self._extract_info()
self._strip_logging_junk()
self._fix_types()
except ValueError:
self.record = None
raise
def _extract_info(self):
"""Dissect the 'message' contents to extract event name and any
embedded key-value pairs.
"""
rec = self.record # alias
message = rec.get('message', rec.get('msg', None))
if message is None:
g_log.error("No 'message' or 'msg' field found in record: {}"
.format(rec))
message = "unknown;Message field not found"
# Split out event name
try:
event, msg = message.split(log_common.EVENT_MSG_SEP, 1)
except ValueError:
event, msg = 'event', message # assign generic event name
if self.strict:
raise ValueError("Cannot split event/msg in '{}'"
.format(message))
# Break into key=value pairs
text = parse_kvp(msg, rec)
# Anything not parsed goes back into message
rec['msg'] = text
# Event gets its own field, too
rec['event'] = event
# Levelname is too long
if 'levelname' in rec:
rec['level'] = rec['levelname']
del rec['levelname']
else:
rec['level'] = logging.getLevelName(logging.INFO)
def _strip_logging_junk(self):
"""Delete/rename fields from logging library."""
rec = self.record # alias
# not needed at all
for k in ('msg', 'threadName', 'thread', 'pathname', 'msecs',
'levelno', 'asctime', 'relativeCreated', 'filename',
'processName', 'process', 'module', 'lineno', 'funcName'):
if k in rec:
del rec[k]
# rename
for old_name, new_name in (('name', 'method'),):
if old_name in rec:
rec[new_name] = rec[old_name]
del rec[old_name]
# remove exception stuff if empty
if rec.get('exc_info', None) is None:
for k in 'exc_info', 'exc_text':
if k in rec:
del rec[k]
# remove args if empty
if 'args' in rec:
if not rec['args']:
del rec['args']
elif self.strict:
raise ValueError("missing 'args'")
def _fix_types(self):
"""Fix types, mainly of fields that were parsed out of the message."""
rec = self.record # alias
# duration
if 'dur' in rec:
rec['dur'] = float(rec['dur'])
# convert created to datetime type (converted on insert by pymongo)
if 'created' in rec:
ts = rec.get('created')
del rec['created']
else:
ts = 0
date = datetime.fromtimestamp(ts, tzlocal())
rec['ts'] = {'sec': ts, 'date': date, 'tz': date.tzname()}
def run(args):
"""
Run the proxy
:param args: Object with the following attributes
conf - Configuration filename
:return:
"""
global m_fwd, g_log
g_log = logging.getLogger(LOGGER_NAME)
# Read configuration for destinations
try:
db_config = DBConfiguration(args.conf)
except (IOError, ValueError, KeyError) as err:
g_log.warn("Database configuration failed: {}".format(err))
db_config = None
try:
syslog_config = SyslogConfiguration(args.conf)
except (IOError, ValueError, KeyError) as err:
g_log.warn("Syslog configuration failed: {}".format(err))
syslog_config = None
# Read configuration for proxy
try:
pconfig = ProxyConfiguration(args.conf)
except (IOError, ValueError, KeyError) as err:
g_log.critical("Proxy configuration failed: {}".format(err))
return 2
# Create LogForwarder
try:
metadata = dict(args.meta) if args.meta else {}
m_fwd = LogForwarder(pconfig, db=db_config, syslog=syslog_config,
meta=metadata)
except pymongo.errors.ConnectionFailure as err:
g_log.warn("Could not connect to MongoDB server at '{}:{:d}': {}"
.format(db_config.db_host, db_config.db_port, err))
# Let user know what's up
g_log.info("Listening on {}:{:d}".format(pconfig.host, pconfig.port))
if db_config:
g_log.info("Connected to MongoDB server at {}:{:d}"
.format(db_config.db_host, db_config.db_port))
if syslog_config:
g_log.info("Connected to syslog at {}:{:d} ({})"
.format(syslog_config.host, syslog_config.port,
syslog_config.proto.upper()))
# Main loop
g_log.debug("Start main loop")
asyncore.loop()
g_log.debug("Stop main loop")
return 0
| mit | -8,789,387,670,317,029,000 | 34.80035 | 84 | 0.560366 | false |
openstack/manila | manila/tests/share/drivers/ibm/test_gpfs.py | 1 | 76153 | # Copyright (c) 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the IBM GPFS driver module."""
import re
import socket
from unittest import mock
import ddt
from oslo_config import cfg
from manila import context
from manila import exception
import manila.share.configuration as config
import manila.share.drivers.ibm.gpfs as gpfs
from manila.share import share_types
from manila import test
from manila.tests import fake_share
from manila import utils
CONF = cfg.CONF
@ddt.ddt
class GPFSShareDriverTestCase(test.TestCase):
"""Tests GPFSShareDriver."""
def setUp(self):
super(GPFSShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self._gpfs_execute = mock.Mock(return_value=('', ''))
self.GPFS_PATH = '/usr/lpp/mmfs/bin/'
self._helper_fake = mock.Mock()
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('share_backend_name', 'GPFS')
self.fake_conf = config.Configuration(None)
self._driver = gpfs.GPFSShareDriver(execute=self._gpfs_execute,
configuration=self.fake_conf)
self._knfs_helper = gpfs.KNFSHelper(self._gpfs_execute,
self.fake_conf)
self._ces_helper = gpfs.CESHelper(self._gpfs_execute,
self.fake_conf)
self.fakedev = "/dev/gpfs0"
self.fakefspath = "/gpfs0"
self.fakesharepath = "/gpfs0/share-fakeid"
self.fakeexistingshare = "existingshare"
self.fakesnapshotpath = "/gpfs0/.snapshots/snapshot-fakesnapshotid"
self.fake_ces_exports = """
mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:44.3.2.11:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:1:2:3:4:5:6:7:8:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:10.0.0.1:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
"""
self.fake_ces_exports_not_found = """
mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit:
"""
self.mock_object(gpfs.os.path, 'exists', mock.Mock(return_value=True))
self._driver._helpers = {
'CES': self._helper_fake
}
self.share = fake_share.fake_share(share_proto='NFS',
host='fakehost@fakehost#GPFS')
self.server = {
'backend_details': {
'ip': '1.2.3.4',
'instance_id': 'fake'
}
}
self.access = fake_share.fake_access()
self.snapshot = fake_share.fake_snapshot()
self.local_ip = "192.11.22.1"
self.remote_ip = "192.11.22.2"
self.remote_ip2 = "2.2.2.2"
gpfs_nfs_server_list = [self.remote_ip, self.local_ip, self.remote_ip2,
"fake_location"]
self._knfs_helper.configuration.gpfs_nfs_server_list = (
gpfs_nfs_server_list)
self._ces_helper.configuration.gpfs_nfs_server_list = (
gpfs_nfs_server_list)
self._ces_helper.configuration.ganesha_config_path = (
"fake_ganesha_config_path")
self.sshlogin = "fake_login"
self.sshkey = "fake_sshkey"
self.gservice = "fake_ganesha_service"
self._ces_helper.configuration.gpfs_ssh_login = self.sshlogin
self._ces_helper.configuration.gpfs_ssh_private_key = self.sshkey
self._ces_helper.configuration.ganesha_service_name = self.gservice
self.mock_object(socket, 'gethostname',
mock.Mock(return_value="testserver"))
self.mock_object(socket, 'gethostbyname_ex', mock.Mock(
return_value=('localhost',
['localhost.localdomain', 'testserver'],
['127.0.0.1', self.local_ip])
))
def test__run_ssh(self):
cmd_list = ['fake', 'cmd']
expected_cmd = 'fake cmd'
ssh_pool = mock.Mock()
ssh = mock.Mock()
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
ssh_pool.item = mock.Mock(return_value=ssh)
setattr(ssh, '__enter__', mock.Mock())
setattr(ssh, '__exit__', mock.Mock())
self.mock_object(self._driver, '_gpfs_ssh_execute')
self._driver._run_ssh(self.local_ip, cmd_list)
self._driver._gpfs_ssh_execute.assert_called_once_with(
mock.ANY, expected_cmd, check_exit_code=True,
ignore_exit_code=None)
def test__run_ssh_exception(self):
cmd_list = ['fake', 'cmd']
ssh_pool = mock.Mock()
ssh = mock.Mock()
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
ssh_pool.item = mock.Mock(return_value=ssh)
self.mock_object(self._driver, '_gpfs_ssh_execute')
self.assertRaises(exception.GPFSException,
self._driver._run_ssh,
self.local_ip, cmd_list)
def test__gpfs_ssh_execute(self):
cmd = 'fake cmd'
expected_out = 'cmd successful'
expected_err = 'cmd error'
ssh = mock.Mock()
stdin_stream = mock.Mock()
stdout_stream = mock.Mock()
stderr_stream = mock.Mock()
ssh.exec_command = mock.Mock(return_value=(stdin_stream,
stdout_stream,
stderr_stream))
stdout_stream.channel.recv_exit_status = mock.Mock(return_value=-1)
stdout_stream.read = mock.Mock(return_value=expected_out)
stderr_stream.read = mock.Mock(return_value=expected_err)
stdin_stream.close = mock.Mock()
actual_out, actual_err = self._driver._gpfs_ssh_execute(ssh, cmd)
self.assertEqual(actual_out, expected_out)
self.assertEqual(actual_err, expected_err)
def test__gpfs_ssh_execute_exception(self):
cmd = 'fake cmd'
ssh = mock.Mock()
stdin_stream = mock.Mock()
stdout_stream = mock.Mock()
stderr_stream = mock.Mock()
ssh.exec_command = mock.Mock(return_value=(stdin_stream,
stdout_stream,
stderr_stream))
stdout_stream.channel.recv_exit_status = mock.Mock(return_value=1)
stdout_stream.read = mock.Mock()
stderr_stream.read = mock.Mock()
stdin_stream.close = mock.Mock()
self.assertRaises(exception.ProcessExecutionError,
self._driver._gpfs_ssh_execute,
ssh, cmd)
def test_get_share_stats_refresh_false(self):
self._driver._stats = {'fake_key': 'fake_value'}
result = self._driver.get_share_stats(False)
self.assertEqual(self._driver._stats, result)
def test_get_share_stats_refresh_true(self):
self.mock_object(
self._driver, '_get_available_capacity',
mock.Mock(return_value=(11111.0, 12345.0)))
result = self._driver.get_share_stats(True)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
]
for key in expected_keys:
self.assertIn(key, result)
self.assertFalse(result['driver_handles_share_servers'])
self.assertEqual('IBM', result['vendor_name'])
self._driver._get_available_capacity.assert_called_once_with(
self._driver.configuration.gpfs_mount_point_base)
def test_do_setup(self):
self.mock_object(self._driver, '_setup_helpers')
self._driver.do_setup(self._context)
self.assertEqual(self._driver._gpfs_execute,
self._driver._gpfs_remote_execute)
self._driver._setup_helpers.assert_called_once_with()
def test_do_setup_gpfs_local_execute(self):
self.mock_object(self._driver, '_setup_helpers')
self._driver.configuration.is_gpfs_node = True
self._driver.do_setup(self._context)
self.assertEqual(self._driver._gpfs_execute,
self._driver._gpfs_local_execute)
self._driver._setup_helpers.assert_called_once_with()
def test_setup_helpers(self):
self._driver._helpers = {}
CONF.set_default('gpfs_share_helpers', ['CES=fakenfs'])
self.mock_object(gpfs.importutils, 'import_class',
mock.Mock(return_value=self._helper_fake))
self._driver._setup_helpers()
gpfs.importutils.import_class.assert_has_calls(
[mock.call('fakenfs')]
)
self.assertEqual(len(self._driver._helpers), 1)
@ddt.data(fake_share.fake_share(),
fake_share.fake_share(share_proto='NFSBOGUS'))
def test__get_helper_with_wrong_proto(self, share):
self.assertRaises(exception.InvalidShare,
self._driver._get_helper, share)
def test__local_path(self):
sharename = 'fakesharename'
self._driver.configuration.gpfs_mount_point_base = (
self.fakefspath)
local_path = self._driver._local_path(sharename)
self.assertEqual(self.fakefspath + '/' + sharename,
local_path)
def test__get_share_path(self):
self._driver.configuration.gpfs_mount_point_base = (
self.fakefspath)
share_path = self._driver._get_share_path(self.share)
self.assertEqual(self.fakefspath + '/' + self.share['name'],
share_path)
def test__get_snapshot_path(self):
self._driver.configuration.gpfs_mount_point_base = (
self.fakefspath)
snapshot_path = self._driver._get_snapshot_path(self.snapshot)
self.assertEqual(self.fakefspath + '/' + self.snapshot['share_name'] +
'/.snapshots/' + self.snapshot['name'],
snapshot_path)
def test_check_for_setup_error_for_gpfs_state(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_export_ip(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = None
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_gpfs_mount_point_base(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = 'test'
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_directory_check(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_gpfs_path_check(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_nfs_server_type(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_nfs_server_type = 'test'
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_nfs_server_list(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_nfs_server_type = 'KNFS'
self._driver.configuration.gpfs_nfs_server_list = None
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test__get_available_capacity(self):
path = self.fakefspath
mock_out = "Filesystem 1-blocks Used Available Capacity Mounted on\n\
/dev/gpfs0 100 30 70 30% /gpfs0"
self.mock_object(self._driver, '_gpfs_execute',
mock.Mock(return_value=(mock_out, '')))
available, size = self._driver._get_available_capacity(path)
self.assertEqual(70, available)
self.assertEqual(100, size)
def test_create_share(self):
self._helper_fake.create_export.return_value = 'fakelocation'
methods = ('_create_share', '_get_share_path')
for method in methods:
self.mock_object(self._driver, method)
result = self._driver.create_share(self._context, self.share,
share_server=self.server)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(result, 'fakelocation')
def test_create_share_from_snapshot(self):
self._helper_fake.create_export.return_value = 'fakelocation'
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._create_share_from_snapshot = mock.Mock()
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._get_share_path.assert_called_once_with(self.share)
self._driver._create_share_from_snapshot.assert_called_once_with(
self.share, self.snapshot,
self.fakesharepath
)
self.assertEqual(result, 'fakelocation')
def test_create_snapshot(self):
self._driver._create_share_snapshot = mock.Mock()
self._driver.create_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._create_share_snapshot.assert_called_once_with(
self.snapshot
)
def test_delete_share(self):
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath
)
self._driver._delete_share = mock.Mock()
self._driver.delete_share(self._context, self.share,
share_server=None)
self._driver._get_share_path.assert_called_once_with(self.share)
self._driver._delete_share.assert_called_once_with(self.share)
self._helper_fake.remove_export.assert_called_once_with(
self.fakesharepath, self.share
)
def test_delete_snapshot(self):
self._driver._delete_share_snapshot = mock.Mock()
self._driver.delete_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._delete_share_snapshot.assert_called_once_with(
self.snapshot
)
def test__delete_share_snapshot(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(return_value=0)
self._driver._delete_share_snapshot(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmdelsnapshot', self.fakedev,
self.snapshot['name'], '-j', self.snapshot['share_name']
)
self._driver._get_gpfs_device.assert_called_once_with()
def test__delete_share_snapshot_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._delete_share_snapshot, self.snapshot)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmdelsnapshot', self.fakedev,
self.snapshot['name'], '-j', self.snapshot['share_name']
)
def test_extend_share(self):
self._driver._extend_share = mock.Mock()
self._driver.extend_share(self.share, 10)
self._driver._extend_share.assert_called_once_with(self.share, 10)
def test__extend_share(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._extend_share(self.share, 10)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:10G')
self._driver._get_gpfs_device.assert_called_once_with()
def test__extend_share_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._extend_share, self.share, 10)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:10G')
self._driver._get_gpfs_device.assert_called_once_with()
def test_update_access_allow(self):
"""Test allow_access functionality via update_access."""
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath
)
self._helper_fake.allow_access = mock.Mock()
self._driver.update_access(self._context,
self.share,
["ignored"],
[self.access],
[],
share_server=None)
self._helper_fake.allow_access.assert_called_once_with(
self.fakesharepath, self.share, self.access)
self.assertFalse(self._helper_fake.resync_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_deny(self):
"""Test deny_access functionality via update_access."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._driver.update_access(self._context,
self.share,
["ignored"],
[],
[self.access],
share_server=None)
self._helper_fake.deny_access.assert_called_once_with(
self.fakesharepath, self.share, self.access)
self.assertFalse(self._helper_fake.resync_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_both(self):
"""Test update_access with allow and deny lists."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._helper_fake.allow_access = mock.Mock()
self._helper_fake.resync_access = mock.Mock()
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._driver.update_access(self._context,
self.share,
["ignore"],
[access_1],
[access_2],
share_server=None)
self.assertFalse(self._helper_fake.resync_access.called)
self._helper_fake.allow_access.assert_called_once_with(
self.fakesharepath, self.share, access_1)
self._helper_fake.deny_access.assert_called_once_with(
self.fakesharepath, self.share, access_2)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_resync(self):
"""Test recovery mode update_access."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._helper_fake.allow_access = mock.Mock()
self._helper_fake.resync_access = mock.Mock()
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._driver.update_access(self._context,
self.share,
[access_1, access_2],
[],
[],
share_server=None)
self._helper_fake.resync_access.assert_called_once_with(
self.fakesharepath, self.share, [access_1, access_2])
self.assertFalse(self._helper_fake.allow_access.called)
self.assertFalse(self._helper_fake.allow_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test__check_gpfs_state_active(self):
fakeout = "mmgetstate::state:\nmmgetstate::active:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._check_gpfs_state()
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmgetstate', '-Y')
self.assertEqual(result, True)
def test__check_gpfs_state_down(self):
fakeout = "mmgetstate::state:\nmmgetstate::down:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._check_gpfs_state()
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmgetstate', '-Y')
self.assertEqual(result, False)
def test__check_gpfs_state_wrong_output_exception(self):
fakeout = "mmgetstate fake out"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._check_gpfs_state)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmgetstate', '-Y')
def test__check_gpfs_state_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._check_gpfs_state)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmgetstate', '-Y')
def test__is_dir_success(self):
fakeoutput = "directory"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
result = self._driver._is_dir(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
self.assertEqual(result, True)
def test__is_dir_failure(self):
fakeoutput = "regular file"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
result = self._driver._is_dir(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
self.assertEqual(result, False)
def test__is_dir_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._is_dir, self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
def test__is_gpfs_path_ok(self):
self._driver._gpfs_execute = mock.Mock(return_value=0)
result = self._driver._is_gpfs_path(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsattr', self.fakefspath)
self.assertEqual(result, True)
def test__is_gpfs_path_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._is_gpfs_path,
self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsattr', self.fakefspath)
def test__get_gpfs_device(self):
fakeout = "Filesystem\n" + self.fakedev
orig_val = self._driver.configuration.gpfs_mount_point_base
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._get_gpfs_device()
self._driver._gpfs_execute.assert_called_once_with('df',
self.fakefspath)
self.assertEqual(result, self.fakedev)
self._driver.configuration.gpfs_mount_point_base = orig_val
def test__get_gpfs_device_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._driver._get_gpfs_device)
def test__create_share(self):
sizestr = '%sG' % self.share['size']
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._create_share(self.share)
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmcrfileset', self.fakedev, self.share['name'],
'--inode-space', 'new')
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'],
'-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:' + sizestr)
self._driver._gpfs_execute.assert_any_call(
'chmod', '777', self.fakesharepath)
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._get_gpfs_device.assert_called_once_with()
def test__create_share_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share, self.share)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmcrfileset', self.fakedev, self.share['name'],
'--inode-space', 'new')
def test__delete_share(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._delete_share(self.share)
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.share['name'], '-f', ignore_exit_code=[2])
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmdelfileset', self.fakedev, self.share['name'],
'-f', ignore_exit_code=[2])
self._driver._get_gpfs_device.assert_called_once_with()
def test__delete_share_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._delete_share, self.share)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.share['name'], '-f', ignore_exit_code=[2])
def test__create_share_snapshot(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._create_share_snapshot(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmcrsnapshot', self.fakedev,
self.snapshot['name'], '-j', self.snapshot['share_name']
)
self._driver._get_gpfs_device.assert_called_once_with()
def test__create_share_snapshot_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share_snapshot, self.snapshot)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmcrsnapshot', self.fakedev,
self.snapshot['name'], '-j', self.snapshot['share_name']
)
def test__create_share_from_snapshot(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._create_share_from_snapshot(self.share, self.snapshot,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
def test__create_share_from_snapshot_exception(self):
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share_from_snapshot,
self.share, self.snapshot, self.fakesharepath)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
)
@ddt.data("mmlsfileset::allocInodes:\nmmlsfileset::100096:",
"mmlsfileset::allocInodes:\nmmlsfileset::0:")
def test__is_share_valid_with_quota(self, fakeout):
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._is_share_valid(self.fakedev, self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J',
self.fakesharepath, '-L', '-Y')
if fakeout == "mmlsfileset::allocInodes:\nmmlsfileset::100096:":
self.assertTrue(result)
else:
self.assertFalse(result)
def test__is_share_valid_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.ManageInvalidShare,
self._driver._is_share_valid, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J',
self.fakesharepath, '-L', '-Y')
def test__is_share_valid_no_share_exist_exception(self):
fakeout = "mmlsfileset::allocInodes:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._is_share_valid, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J',
self.fakesharepath, '-L', '-Y')
def test__get_share_name(self):
fakeout = "mmlsfileset::filesetName:\nmmlsfileset::existingshare:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._get_share_name(self.fakedev, self.fakesharepath)
self.assertEqual('existingshare', result)
def test__get_share_name_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.ManageInvalidShare,
self._driver._get_share_name, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J',
self.fakesharepath, '-L', '-Y')
def test__get_share_name_no_share_exist_exception(self):
fakeout = "mmlsfileset::filesetName:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._get_share_name, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J',
self.fakesharepath, '-L', '-Y')
@ddt.data("mmlsquota::blockLimit:\nmmlsquota::1048577",
"mmlsquota::blockLimit:\nmmlsquota::1048576",
"mmlsquota::blockLimit:\nmmlsquota::0")
def test__manage_existing(self, fakeout):
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self._helper_fake.create_export.return_value = 'fakelocation'
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
actual_size, actual_path = self._driver._manage_existing(
self.fakedev, self.share, self.fakeexistingshare)
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f')
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name'])
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'],
'-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call(
'chmod', '777', self.fakesharepath)
if fakeout == "mmlsquota::blockLimit:\nmmlsquota::1048577":
self._driver._gpfs_execute.assert_called_with(
self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:2G')
self.assertEqual(2, actual_size)
self.assertEqual('fakelocation', actual_path)
elif fakeout == "mmlsquota::blockLimit:\nmmlsquota::0":
self._driver._gpfs_execute.assert_called_with(
self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:1G')
self.assertEqual(1, actual_size)
self.assertEqual('fakelocation', actual_path)
else:
self.assertEqual(1, actual_size)
self.assertEqual('fakelocation', actual_path)
def test__manage_existing_fileset_unlink_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._gpfs_execute.assert_called_once_with(
self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f')
def test__manage_existing_fileset_creation_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name'])])
def test__manage_existing_fileset_relink_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name']),
mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath)])
def test__manage_existing_permission_change_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name']),
mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath)])
def test__manage_existing_checking_quota_of_fileset_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name']),
mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'],
'-Y', self.fakedev)])
def test__manage_existing_unable_to_get_quota_of_fileset_exception(self):
fakeout = "mmlsquota::blockLimit:"
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f')
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name'])
self._driver._gpfs_execute.assert_any_call(
self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call(
'chmod', '777', self.fakesharepath)
self._driver._gpfs_execute.assert_called_with(
self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'],
'-Y', self.fakedev)
def test__manage_existing_set_quota_of_fileset_less_than_1G_exception(
self):
sizestr = '1G'
mock_out = "mmlsquota::blockLimit:\nmmlsquota::0:", None
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', mock_out,
exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name']),
mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'],
'-Y', self.fakedev),
mock.call(self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:' + sizestr)])
def test__manage_existing_set_quota_of_fileset_grater_than_1G_exception(
self):
sizestr = '2G'
mock_out = "mmlsquota::blockLimit:\nmmlsquota::1048577:", None
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', mock_out,
exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev,
self.fakeexistingshare, '-f'),
mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev,
self.fakeexistingshare, '-j', self.share['name']),
mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev,
self.share['name'], '-J', self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'],
'-Y', self.fakedev),
mock.call(self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' +
self.share['name'], '--block', '0:' + sizestr)])
def test_manage_existing(self):
self._driver._manage_existing = mock.Mock(return_value=('1',
'fakelocation'))
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._is_share_valid = mock.Mock(return_value=True)
self._driver._get_share_name = mock.Mock(return_value=self.
fakeexistingshare)
self._helper_fake._has_client_access = mock.Mock(return_value=[])
result = self._driver.manage_existing(self.share, {})
self.assertEqual('1', result['size'])
self.assertEqual('fakelocation', result['export_locations'])
def test_manage_existing_incorrect_path_exception(self):
share = fake_share.fake_share(export_location="wrong_ip::wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
def test_manage_existing_incorrect_ip_exception(self):
share = fake_share.fake_share(export_location="wrong_ip:wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
def test__manage_existing_invalid_export_exception(self):
share = fake_share.fake_share(export_location="wrong_ip/wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
@ddt.data(True, False)
def test_manage_existing_invalid_share_exception(self, valid_share):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._is_share_valid = mock.Mock(return_value=valid_share)
if valid_share:
self._driver._get_share_name = mock.Mock(return_value=self.
fakeexistingshare)
self._helper_fake._has_client_access = mock.Mock()
else:
self.assertFalse(self._helper_fake._has_client_access.called)
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, self.share, {})
def test__gpfs_local_execute(self):
self.mock_object(utils, 'execute', mock.Mock(return_value=True))
cmd = "testcmd"
self._driver._gpfs_local_execute(cmd, ignore_exit_code=[2])
utils.execute.assert_called_once_with(cmd, run_as_root=True,
check_exit_code=[2, 0])
def test__gpfs_remote_execute(self):
self._driver._run_ssh = mock.Mock(return_value=True)
cmd = "testcmd"
orig_value = self._driver.configuration.gpfs_share_export_ip
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver._gpfs_remote_execute(cmd, check_exit_code=True)
self._driver._run_ssh.assert_called_once_with(
self.local_ip, tuple([cmd]), None, True
)
self._driver.configuration.gpfs_share_export_ip = orig_value
def test_knfs_resync_access(self):
self._knfs_helper.allow_access = mock.Mock()
path = self.fakesharepath
to_remove = '3.3.3.3'
fake_exportfs_before = ('%(path)s\n\t\t%(ip)s\n'
'/other/path\n\t\t4.4.4.4\n' %
{'path': path, 'ip': to_remove})
fake_exportfs_after = '/other/path\n\t\t4.4.4.4\n'
self._knfs_helper._execute = mock.Mock(
return_value=(fake_exportfs_before, ''))
self._knfs_helper._publish_access = mock.Mock(
side_effect=[[(fake_exportfs_before, '')],
[(fake_exportfs_after, '')]])
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._knfs_helper.resync_access(path, self.share, [access_1, access_2])
self._knfs_helper.allow_access.assert_has_calls([
mock.call(path, self.share, access_1, error_on_exists=False),
mock.call(path, self.share, access_2, error_on_exists=False)])
self._knfs_helper._execute.assert_called_once_with(
'exportfs', run_as_root=True)
self._knfs_helper._publish_access.assert_has_calls([
mock.call('exportfs', '-u',
'%(ip)s:%(path)s' % {'ip': to_remove, 'path': path},
check_exit_code=[0, 1]),
mock.call('exportfs')])
@ddt.data('rw', 'ro')
def test_knfs_get_export_options(self, access_level):
mock_out = {"knfs:export_options": "no_root_squash"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = fake_share.fake_access(access_level=access_level)
out = self._knfs_helper.get_export_options(self.share, access, 'KNFS')
self.assertEqual("no_root_squash,%s" % access_level, out)
def test_knfs_get_export_options_default(self):
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access = self.access
out = self._knfs_helper.get_export_options(self.share, access, 'KNFS')
self.assertEqual("rw", out)
def test_knfs_get_export_options_invalid_option_ro(self):
mock_out = {"knfs:export_options": "ro"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._knfs_helper.get_export_options,
share, access, 'KNFS')
def test_knfs_get_export_options_invalid_option_rw(self):
mock_out = {"knfs:export_options": "rw"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._knfs_helper.get_export_options,
share, access, 'KNFS')
@ddt.data(("/gpfs0/share-fakeid\t10.0.0.1", None),
("", None),
("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.1"),
("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.2"))
@ddt.unpack
def test_knfs__has_client_access(self, mock_out, access_to):
self._knfs_helper._execute = mock.Mock(return_value=[mock_out, 0])
result = self._knfs_helper._has_client_access(self.fakesharepath,
access_to)
self._ces_helper._execute.assert_called_once_with('exportfs',
check_exit_code=True,
run_as_root=True)
if mock_out == "/gpfs0/share-fakeid\t10.0.0.1":
if access_to in (None, "10.0.0.1"):
self.assertTrue(result)
else:
self.assertFalse(result)
else:
self.assertFalse(result)
def test_knfs_allow_access(self):
self._knfs_helper._execute = mock.Mock(
return_value=['/fs0 <world>', 0]
)
self.mock_object(re, 'search', mock.Mock(return_value=None))
export_opts = None
self._knfs_helper.get_export_options = mock.Mock(
return_value=export_opts
)
self._knfs_helper._publish_access = mock.Mock()
access = self.access
local_path = self.fakesharepath
self._knfs_helper.allow_access(local_path, self.share, access)
self._knfs_helper._execute.assert_called_once_with('exportfs',
run_as_root=True)
self.assertTrue(re.search.called)
self._knfs_helper.get_export_options.assert_any_call(
self.share, access, 'KNFS')
cmd = ['exportfs', '-o', export_opts, ':'.join([access['access_to'],
local_path])]
self._knfs_helper._publish_access.assert_called_once_with(*cmd)
def test_knfs_allow_access_access_exists(self):
out = ['/fs0 <world>', 0]
self._knfs_helper._execute = mock.Mock(return_value=out)
self.mock_object(re, 'search', mock.Mock(return_value="fake"))
self._knfs_helper.get_export_options = mock.Mock()
access = self.access
local_path = self.fakesharepath
self.assertRaises(exception.ShareAccessExists,
self._knfs_helper.allow_access,
local_path, self.share, access)
self._knfs_helper._execute.assert_any_call('exportfs',
run_as_root=True)
self.assertTrue(re.search.called)
self.assertFalse(self._knfs_helper.get_export_options.called)
def test_knfs_allow_access_publish_exception(self):
self._knfs_helper.get_export_options = mock.Mock()
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError('boom'))
self.assertRaises(exception.GPFSException,
self._knfs_helper.allow_access,
self.fakesharepath,
self.share,
self.access,
error_on_exists=False)
self.assertTrue(self._knfs_helper.get_export_options.called)
self.assertTrue(self._knfs_helper._publish_access.called)
def test_knfs_allow_access_invalid_access(self):
access = fake_share.fake_access(access_type='test')
self.assertRaises(exception.InvalidShareAccess,
self._knfs_helper.allow_access,
self.fakesharepath, self.share,
access)
def test_knfs_allow_access_exception(self):
self._knfs_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
access = self.access
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._knfs_helper.allow_access,
local_path, self.share,
access)
self._knfs_helper._execute.assert_called_once_with('exportfs',
run_as_root=True)
def test_knfs__verify_denied_access_pass(self):
local_path = self.fakesharepath
ip = self.access['access_to']
fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n'
'/shares/share-2\n\t\t2.2.2.2\n')
self._knfs_helper._publish_access = mock.Mock(
return_value=[(fake_exportfs, '')])
self._knfs_helper._verify_denied_access(local_path, self.share, ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
def test_knfs__verify_denied_access_fail(self):
local_path = self.fakesharepath
ip = self.access['access_to']
data = {'path': local_path, 'ip': ip}
fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n'
'%(path)s\n\t\t%(ip)s\n'
'/shares/share-2\n\t\t2.2.2.2\n') % data
self._knfs_helper._publish_access = mock.Mock(
return_value=[(fake_exportfs, '')])
self.assertRaises(exception.GPFSException,
self._knfs_helper._verify_denied_access,
local_path,
self.share,
ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
def test_knfs__verify_denied_access_exception(self):
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError
)
ip = self.access['access_to']
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._knfs_helper._verify_denied_access,
local_path,
self.share,
ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
@ddt.data((None, False),
('', False),
(' ', False),
('Some error to log', True))
@ddt.unpack
def test_knfs__verify_denied_access_stderr(self, stderr, is_logged):
"""Stderr debug logging should only happen when not empty."""
outputs = [('', stderr)]
self._knfs_helper._publish_access = mock.Mock(return_value=outputs)
gpfs.LOG.debug = mock.Mock()
self._knfs_helper._verify_denied_access(
self.fakesharepath, self.share, self.remote_ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
self.assertEqual(is_logged, gpfs.LOG.debug.called)
def test_knfs_deny_access(self):
self._knfs_helper._publish_access = mock.Mock(return_value=[('', '')])
access = self.access
local_path = self.fakesharepath
self._knfs_helper.deny_access(local_path, self.share, access)
deny = ['exportfs', '-u', ':'.join([access['access_to'], local_path])]
self._knfs_helper._publish_access.assert_has_calls([
mock.call(*deny, check_exit_code=[0, 1]),
mock.call('exportfs')])
def test_knfs_deny_access_exception(self):
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError
)
access = self.access
local_path = self.fakesharepath
cmd = ['exportfs', '-u', ':'.join([access['access_to'], local_path])]
self.assertRaises(exception.GPFSException,
self._knfs_helper.deny_access, local_path,
self.share, access)
self._knfs_helper._publish_access.assert_called_once_with(
*cmd, check_exit_code=[0, 1])
def test_knfs__publish_access(self):
self.mock_object(utils, 'execute')
fake_command = 'fakecmd'
cmd = [fake_command]
self._knfs_helper._publish_access(*cmd)
utils.execute.assert_any_call(*cmd, run_as_root=True,
check_exit_code=True)
remote_login = self.sshlogin + '@' + self.remote_ip
remote_login2 = self.sshlogin + '@' + self.remote_ip2
utils.execute.assert_has_calls([
mock.call('ssh', remote_login, fake_command,
check_exit_code=True, run_as_root=False),
mock.call(fake_command, check_exit_code=True, run_as_root=True),
mock.call('ssh', remote_login2, fake_command,
check_exit_code=True, run_as_root=False)])
self.assertTrue(socket.gethostbyname_ex.called)
self.assertTrue(socket.gethostname.called)
def test_knfs__publish_access_exception(self):
self.mock_object(
utils, 'execute',
mock.Mock(side_effect=(0, exception.ProcessExecutionError)))
fake_command = 'fakecmd'
cmd = [fake_command]
self.assertRaises(exception.ProcessExecutionError,
self._knfs_helper._publish_access, *cmd)
self.assertTrue(socket.gethostbyname_ex.called)
self.assertTrue(socket.gethostname.called)
remote_login = self.sshlogin + '@' + self.remote_ip
utils.execute.assert_has_calls([
mock.call('ssh', remote_login, fake_command,
check_exit_code=True, run_as_root=False),
mock.call(fake_command, check_exit_code=True, run_as_root=True)])
@ddt.data('rw', 'ro')
def test_ces_get_export_options(self, access_level):
mock_out = {"ces:export_options": "squash=no_root_squash"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = fake_share.fake_access(access_level=access_level)
out = self._ces_helper.get_export_options(self.share, access, 'CES')
self.assertEqual("squash=no_root_squash,access_type=%s" % access_level,
out)
def test_ces_get_export_options_default(self):
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access = self.access
out = self._ces_helper.get_export_options(self.share, access,
'CES')
self.assertEqual("access_type=rw", out)
def test_ces_get_export_options_invalid_option_ro(self):
mock_out = {"ces:export_options": "access_type=ro"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._ces_helper.get_export_options,
share, access, 'CES')
def test_ces_get_export_options_invalid_option_rw(self):
mock_out = {"ces:export_options": "access_type=rw"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._ces_helper.get_export_options,
share, access, 'CES')
def test__get_nfs_client_exports_exception(self):
self._ces_helper._execute = mock.Mock(return_value=('junk', ''))
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._ces_helper._get_nfs_client_exports,
local_path)
self._ces_helper._execute.assert_called_once_with(
self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y')
@ddt.data('44.3.2.11', '1:2:3:4:5:6:7:8')
def test__fix_export_data(self, ip):
data = None
for line in self.fake_ces_exports.splitlines():
if "HEADER" in line:
headers = line.split(':')
if ip in line:
data = line.split(':')
break
self.assertIsNotNone(
data, "Test data did not contain a line with the test IP.")
result_data = self._ces_helper._fix_export_data(data, headers)
self.assertEqual(ip, result_data[headers.index('Clients')])
@ddt.data((None, True),
('44.3.2.11', True),
('44.3.2.1', False),
('4.3.2.1', False),
('4.3.2.11', False),
('1.2.3.4', False),
('', False),
('*', False),
('.', False),
('1:2:3:4:5:6:7:8', True))
@ddt.unpack
def test_ces__has_client_access(self, ip, has_access):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self.assertEqual(has_access,
self._ces_helper._has_client_access(local_path, ip))
self._ces_helper._execute.assert_called_once_with(
self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y')
def test_ces_remove_export_no_exports(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.remove_export(local_path, self.share)
self._ces_helper._execute.assert_called_once_with(
self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y')
def test_ces_remove_export_existing_exports(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.remove_export(local_path, self.share)
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'remove',
local_path),
])
def test_ces_remove_export_exception(self):
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.remove_export,
local_path, self.share)
def test_ces_allow_access(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
export_opts = "access_type=rw"
self._ces_helper.get_export_options = mock.Mock(
return_value=export_opts)
access = self.access
local_path = self.fakesharepath
self._ces_helper.allow_access(local_path, self.share, access)
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'add', local_path,
'-c', access['access_to'] + '(' + export_opts + ')')])
def test_ces_allow_access_existing_exports(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
export_opts = "access_type=rw"
self._ces_helper.get_export_options = mock.Mock(
return_value=export_opts)
access = self.access
local_path = self.fakesharepath
self._ces_helper.allow_access(self.fakesharepath, self.share,
self.access)
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path,
'--nfsadd', access['access_to'] + '(' +
export_opts + ')')])
def test_ces_allow_access_invalid_access_type(self):
access = fake_share.fake_access(access_type='test')
self.assertRaises(exception.InvalidShareAccess,
self._ces_helper.allow_access,
self.fakesharepath, self.share,
access)
def test_ces_allow_access_exception(self):
access = self.access
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.allow_access, local_path,
self.share, access)
def test_ces_deny_access(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
access = self.access
local_path = self.fakesharepath
self._ces_helper.deny_access(local_path, self.share, access)
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path,
'--nfsremove', access['access_to'])])
def test_ces_deny_access_exception(self):
access = self.access
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.deny_access, local_path,
self.share, access)
def test_ces_resync_access_add(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(return_value=(mock_out, ''))
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access_rules = [self.access]
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, self.share, access_rules)
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'add', local_path,
'-c', self.access['access_to'] + '(' + "access_type=rw" +
')')
])
share_types.get_extra_specs_from_share.assert_called_once_with(
self.share)
def test_ces_resync_access_change(self):
class SortedMatch(object):
def __init__(self, f, expected):
self.assertEqual = f
self.expected = expected
def __eq__(self, actual):
expected_list = self.expected.split(',')
actual_list = actual.split(',')
self.assertEqual(sorted(expected_list), sorted(actual_list))
return True
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access_rules = [fake_share.fake_access(access_to='1.1.1.1'),
fake_share.fake_access(
access_to='10.0.0.1', access_level='ro')]
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, self.share, access_rules)
share_types.get_extra_specs_from_share.assert_called_once_with(
self.share)
to_remove = '1:2:3:4:5:6:7:8,44.3.2.11'
to_add = access_rules[0]['access_to'] + '(' + "access_type=rw" + ')'
to_change = access_rules[1]['access_to'] + '(' + "access_type=ro" + ')'
self._ces_helper._execute.assert_has_calls([
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n',
local_path, '-Y'),
mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path,
'--nfsremove', SortedMatch(self.assertEqual, to_remove),
'--nfsadd', to_add,
'--nfschange', to_change)
])
def test_ces_resync_nothing(self):
"""Test that hits the add-no-rules case."""
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, None, [])
self._ces_helper._execute.assert_called_once_with(
self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y')
| apache-2.0 | 3,697,016,132,036,704,000 | 45.548289 | 219 | 0.580463 | false |
dchud/sentinel | test/stats.py | 1 | 3210 | # $Id$
from unittest import TestCase
from canary.context import Context
from canary.stats import *
from canary.search import RecordSearcher
class StatsTests (TestCase):
context = Context()
def setUp (self):
# each test gets a new collector
self.collector = StatCollector(self.context)
# get some records for statistics generation once
searcher = RecordSearcher(self.context)
self.records = searcher.search('environment')
def test_curators (self):
handler = ArticleTypeHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_article_types (self):
handler = ArticleTypeHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_methodology_samplings (self):
handler = MethodologySamplingHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_methology_types (self):
handler = MethodologyTypeHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_methology_timings (self):
handler = MethodologyTimingHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_methology_controls (self):
handler = MethodologyControlHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_exposure_routes (self):
handler = ExposureRouteHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_exposures (self):
handler = ExposureHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_risk_factors (self):
handler = RiskFactorHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_outcomes (self):
handler = OutcomeHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_species (self):
handler = SpeciesHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
def test_locations (self):
handler = LocationHandler()
self.collector.add_handler(handler)
self.collector.process(self.records)
self.assertEquals(len(handler.stats.keys()) > 0, True)
| mit | -5,261,885,974,166,921,000 | 34.666667 | 62 | 0.665732 | false |
balloob/home-assistant | tests/components/freebox/test_config_flow.py | 6 | 5212 | """Tests for the Freebox config flow."""
from aiofreepybox.exceptions import (
AuthorizationError,
HttpRequestError,
InvalidTokenError,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.freebox.const import DOMAIN
from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
HOST = "myrouter.freeboxos.fr"
PORT = 1234
@pytest.fixture(name="connect")
def mock_controller_connect():
"""Mock a successful connection."""
with patch("homeassistant.components.freebox.router.Freepybox") as service_mock:
service_mock.return_value.open = AsyncMock()
service_mock.return_value.system.get_config = AsyncMock(
return_value={
"mac": "abcd",
"model_info": {"pretty_name": "Pretty Model"},
"firmware_version": "123",
}
)
service_mock.return_value.lan.get_hosts_list = AsyncMock()
service_mock.return_value.connection.get_status = AsyncMock()
service_mock.return_value.close = AsyncMock()
yield service_mock
async def test_user(hass):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_import(hass):
"""Test import step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_discovery(hass):
"""Test discovery step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_link(hass, connect):
"""Test linking."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == HOST
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
async def test_abort_if_already_setup(hass):
"""Test we abort if component is already setup."""
MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: HOST, CONF_PORT: PORT}, unique_id=HOST
).add_to_hass(hass)
# Should fail, same HOST (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_on_link_failed(hass):
"""Test when we have errors during linking the router."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=AuthorizationError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "register_failed"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=HttpRequestError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=InvalidTokenError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
| apache-2.0 | -3,516,609,556,182,169,600 | 33.746667 | 86 | 0.644474 | false |
kevinzhang1986/codezero | tools/cml2header.py | 6 | 3635 | #!/usr/bin/python
import os
import sys
import re
import StringIO
import getopt
class cml2header_translator:
'''
Translates a cml2 configuration file to a C header file
that can be included in source code.
'''
def __init__(self):
self.isnotset = re.compile("^# (.*) is not set")
def linetrans(self, instream, outstream, trailer=None):
'''
Translates a stream line-by-line
'''
if not hasattr(instream, "readline"):
instream = open(instream, "r")
if not hasattr(outstream, "readline"):
outstream = open(outstream, "w")
while 1:
line = instream.readline()
if not line:
break
new = self.write_include(line)
if new:
outstream.write(new)
instream.close()
if trailer:
outstream.write(trailer)
outstream.close()
def write_include(self, line):
'''
Translate a line of CML2 config file statement into
C preprocessor #define format.
'''
if line.find("PRIVATE") > -1 or line[:2] == "$$":
return ""
match = self.isnotset.match(line)
if match:
return "#undef %s\n" % match.group(1)
if line == "#\n":
return None
elif line[0] == "#":
return "/* " + line[1:].strip() + " */\n"
eq = line.find("=")
if eq == -1:
return line
else:
line = line.split('#')[0]
symbol = line[:eq]
value = line[eq+1 :].strip()
if value == 'y':
return "#define %s 1\n" % symbol
elif value == 'm':
return "#undef %s\n#define %s_MODULE 1\n" % (symbol, symbol)
elif value == 'n':
return "#undef %s\n" % symbol
else:
return "#define %s %s\n" % (symbol, value)
def cml_configfile_exists(self, path):
if not os.path.exists(path):
print "CML2 configuration file not found."
print "Given path:", path, "does not exist."
return 0
else:
return 1
def translate(self, configfile_path = None, headerfile_path = None):
if configfile_path == None:
configfile_path = os.getcwd() + '/' + "config.out"
if headerfile_path == None:
headerfile_path = os.getcwd() + '/' + "config.h"
'''
Takes a configuration file generated by the cml2 configurator,
and converts it to a C header file. This header file is then
included by the kernel sources and the preprocessor definitions
in it are used during the compilation of the kernel.
'''
if not self.cml_configfile_exists(configfile_path):
print "Failed to translate cml configuration file", configfile_path, 'to', headerfile_path
return -1
self.linetrans(configfile_path, headerfile_path)
return 0
def usage(self):
helpstr = \
'''\
Description:
cml2header.py takes a cml2 configuration file as input and generates
a valid C header file to be included in C source code.
Usage:
cml2header.py -i inputfile -o outputfile [-h]
Options:
-i The CML2 configuration file (config.out by default)
-o The name of the header file (config.h by default)
-h This help message
'''
print helpstr
def main():
translator = cml2header_translator()
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:h:")
except getopt.GetoptError:
# print help information and exit:
translator.usage()
sys.exit(2)
ofile = None
ifile = None
# Print help message if requested and quit
if '-h' in opts:
translator.usage()
sys.exit()
# Get the input and output filenames.
for switch, val in opts:
if switch == '-i':
ifile = val
if switch == '-o':
ofile = val
# Translate
if translator.translate(ifile, ofile) < 0:
print "Translation of", ifile, 'to', ofile, 'failed.'
# Run the main routine only if this file is explicitly executed.
# (i.e. not referred from another module)
if __name__ == "__main__":
main()
| gpl-3.0 | 6,780,618,843,396,138,000 | 24.243056 | 93 | 0.650894 | false |
liorvh/golismero | tools/sqlmap/thirdparty/multipart/multipartpost.py | 15 | 3866 | #!/usr/bin/env python
"""
02/2006 Will Holcomb <[email protected]>
Reference: http://odin.himinbi.org/MultipartPostHandler.py
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import mimetools
import mimetypes
import os
import stat
import StringIO
import sys
import urllib
import urllib2
from lib.core.exception import SqlmapDataException
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given
# multiple values by assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if isinstance(value, file) or hasattr(value, 'file') or isinstance(value, StringIO.StringIO):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise SqlmapDataException, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
#if (request.has_header('Content-Type') and request.get_header('Content-Type').find('multipart/form-data') != 0):
# print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buf = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buf is None:
buf = ''
for (key, value) in vars:
buf += '--%s\r\n' % boundary
buf += 'Content-Disposition: form-data; name="%s"' % key
buf += '\r\n\r\n' + value + '\r\n'
for (key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if isinstance(fd, file) else fd.len
filename = fd.name.split('/')[-1] if '/' in fd.name else fd.name.split('\\')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf += '--%s\r\n' % boundary
buf += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buf += 'Content-Type: %s\r\n' % contenttype
# buf += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf = str(buf)
buf += '\r\n%s\r\n' % fd.read()
buf += '--%s--\r\n\r\n' % boundary
return boundary, buf
multipart_encode = Callable(multipart_encode)
https_request = http_request
| gpl-2.0 | 2,612,569,231,227,378,000 | 35.130841 | 129 | 0.612519 | false |
yala/introdeeplearning | lab1_utils.py | 1 | 7912 | import csv
import pandas as pd
import re
import special_tokens
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
MIN_WORD_COUNT = 10
def flatten(nested):
return [x for sublist in nested for x in sublist]
def load_sentiment_data_bow():
sentences = []
data = pd.read_csv("./data/sentiment-tweets.csv")
sentences = list(data['text'])
sentiment_labels = data['airline_sentiment']
sentences_of_words = [split_into_words(sentence) for sentence in sentences]
word_counts_initial = get_word_counts(sentences_of_words)
sentences_of_words = filter_words_by_count(sentences_of_words, word_counts_initial, MIN_WORD_COUNT)
word_counts = get_word_counts(sentences_of_words)
all_words = word_counts.keys()
vocab_size = len(all_words)
index_to_word = {index:word for index, word in enumerate(all_words)}
word_to_index = {word: index for index, word in enumerate(all_words)}
n_sentences = len(sentences_of_words)
classes = set(sentiment_labels)
n_classes = len(classes)
X = np.zeros(shape=(n_sentences, vocab_size))
y = np.zeros(shape=(n_sentences, n_classes))
label_to_index = {'negative': -1, 'neutral': 0, 'positive':1}
for sentence_index in range(n_sentences):
current_sentence = sentences_of_words[sentence_index]
for current_word_position in range(len(current_sentence)):
word = sentences_of_words[sentence_index][current_word_position]
token_index = word_to_index[word]
X[sentence_index][token_index] += 1
sentiment_label = sentiment_labels[sentence_index]
sentiment_label_index = label_to_index[sentiment_label]
y[sentence_index][sentiment_label_index] = 1
return X, y, index_to_word, sentences
def load_sentiment_data(max_len):
sentences = []
data = pd.read_csv("./data/sentiment-tweets.csv")
sentences = list(data['text'])
sentiment_labels = data['airline_sentiment']
sentences_of_words = [split_into_words(sentence) for sentence in sentences]
word_counts_initial = get_word_counts(sentences_of_words)
sentences_of_words = filter_words_by_count(sentences_of_words, word_counts_initial, MIN_WORD_COUNT)
word_counts = get_word_counts(sentences_of_words)
all_words = word_counts.keys()
vocab_size = len(all_words)
index_to_word = {index:word for index, word in enumerate(all_words)}
word_to_index = {word: index for index, word in enumerate(all_words)}
n_sentences = len(sentences_of_words)
classes = set(sentiment_labels)
n_classes = len(classes)
X = np.zeros(shape=(n_sentences, max_len, vocab_size), dtype='float32')
y = np.zeros(shape=(n_sentences, n_classes), dtype='float32')
label_to_index = {'negative': 0, 'neutral': 1, 'positive':2}
for sentence_index in range(n_sentences):
current_sentence = sentences_of_words[sentence_index]
for current_word_position in range(min(max_len, len(current_sentence))):
word = sentences_of_words[sentence_index][current_word_position]
token_index = word_to_index[word]
X[sentence_index][current_word_position][token_index] = 1
sentiment_label = sentiment_labels[sentence_index]
sentiment_label_index = label_to_index[sentiment_label]
y[sentence_index][sentiment_label_index] = 1
return X, y, index_to_word, sentences
def filter_words_by_count(sentences, word_counts, cutoff=5):
new_sentences = []
for s_i in range(len(sentences)):
sentence = sentences[s_i]
new_sentence = []
for w_i in range(len(sentence)):
word = sentence[w_i]
new_word = word
if word_counts[word] < cutoff:
new_word = special_tokens._UNK
new_sentence.append(new_word)
new_sentences.append(new_sentence)
return new_sentences
def get_sentence_length_stats(sentences_of_words):
print(np.mean([len(sentence) for sentence in sentences_of_words]))
def get_word_counts(sentences_of_words):
word_counts = {0:special_word for special_word in special_tokens._START_VOCAB}
for sentence in sentences_of_words:
for word in sentence:
word_counts[word] = word_counts.get(word, 0) + 1
return word_counts
def split_into_words(sentence):
"""Basic word splitting"""
_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w.lower() for w in words if w]
def split_data(X, y, train_split=0.8, dev_split=0.1, test_split=0.1, random=False):
"""Splits data"""
num_examples = len(X)
indices = range(X.shape[0])
if random:
random.seed(42)
random.shuffle(indices)
boundary = int(num_examples*train_split)
training_idx, test_idx = indices[:boundary], indices[boundary:]
X_train, X_test = X[training_idx,:], X[test_idx,:]
y_train, y_test = y[training_idx,:], y[test_idx,:]
return X_train, y_train, X_test, y_test
def get_random_minibatch_indices(n_examples, batch_size):
indices = range(n_examples)
random.shuffle(indices)
num_batches = n_examples/batch_size
minibatch_indices = np.zeros(shape=(num_batches, batch_size), dtype='int32')
for b_i in range(num_batches):
for ex_i in range(batch_size):
minibatch_indices[b_i] = indices[b_i*batch_size:(b_i+1)*batch_size]
return minibatch_indices
def unpack_sequence(tensor):
"""Split the single tensor of a sequence into a list of frames."""
return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2]))
def pack_sequence(sequence):
"""Combine a list of the frames into a single tensor of the sequence."""
return tf.transpose(tf.pack(sequence), perm=[1, 0, 2])
def bow_to_dict(bow_row, index_to_word):
words = {}
for i in range(len(bow_row)):
word_count = bow_row[i]
if word_count > 0:
word = index_to_word[i]
words[word] = words.get(word, 0)+word_count
return words
def label_to_desc(label):
return ["negative", "neutral", "positive"][np.argmax(label)]
def classify_and_plot(data, labels, x, out, session):
outputs = []
for i in range(len(data)):
x_input = [data[i]]
feed_dict = {x: x_input}
output = session.run([out], feed_dict=feed_dict)
outputs.append(output[0])
plt.figure()
plt.axis([0, 10, 0, 10])
for i in range(len(outputs)):
x_input = data[i]
# print(outputs[i].shape)
# print(float(outputs[i]))
decision = 1 if float(outputs[i]) > 0.5 else 0
label = labels[i]
# print('ec', int(decision), int(label))
# print('ec2', decision, label)
# print(int(decision) is int(label))
m_text = 'g' if int(decision) == int(label) else 'r'
m_text += '_' if label == 0 else '+'
plt.plot(x_input[0], x_input[1], m_text, markersize=10)
plt.show()
x_min, x_max = data[:, 0].min() - 1, data[:, 0].max() + 1
y_min, y_max = data[:, 1].min() - 1, data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max),
np.arange(y_min, y_max))
xx = np.arange(0, 100)/10.0
yy = np.arange(0, 100)/10.0
mesh = np.array([[j, i] for i in range(100) for j in range(100)])/10.0
# here "model" is your model's prediction (classification) function
Z = session.run([out], feed_dict={x: mesh})[0]
# print(Z)
Z = np.array(Z)
Z += 0.5
Z = Z.astype(int)
# Z = session.run([out], feed_dict={x_in:}) model(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((100, 100))
plt.contourf(xx, yy, Z)
plt.show()
print('predictions', outputs)
def one_hot(i, end):
v = np.zeros(end)
v[i] = 1
return v
| mit | 4,097,584,472,053,008,000 | 35.127854 | 103 | 0.63132 | false |
mx3L/subssupport | test/test_processing.py | 1 | 2936 | import os
import sys
import unittest
test = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(test, '..', 'plugin'))
from process import SubsLoader, DecodeError, LoadError, \
ParseError, ParserNotFoundError
from parsers import SubRipParser, MicroDVDParser
PARSERS = [SubRipParser, MicroDVDParser]
ENCODINGS = ['utf-8',
'utf-16'
'windows-1252',
'windows-1256',
'windows-1250',
'iso-8859-2',
'maclatin2',
'IBM852',
'iso-8859-15',
'macroman',
'ibm1140',
'IBM850',
'windows-1251',
'cyrillic',
'maccyrillic',
'koi8_r',
'IBM866']
SUBS_PATH = os.path.join(os.path.dirname(__file__), 'subfiles')
TESTFILES = [os.path.join(SUBS_PATH, 'test_arabic.srt'),
os.path.join(SUBS_PATH, 'test_random.srt'),
os.path.join(SUBS_PATH, 'test_tags.srt'),
os.path.join(SUBS_PATH, 'test_null_chars.srt'),
os.path.join(SUBS_PATH, 'test_utf16.srt'),
os.path.join(SUBS_PATH, 'test_microdvd.txt')]
class LoaderTestCase(unittest.TestCase):
def setUp(self):
self.subsloader = SubsLoader(PARSERS, ENCODINGS)
def test_loader(self):
for subpath in TESTFILES:
sublist, encoding = self.subsloader.load(subpath, fps=25)
self.assertTrue(len(sublist) > 1, 'parsed subtitle list has to have at least 2 entries')
self.assertTrue(encoding != '', 'cannot detect encoding')
print '\n'
def test_utf16(self):
self.subsloader.change_encodings(['utf-8', 'utf-16'])
sublist, encoding = self.subsloader.load(os.path.join(SUBS_PATH, 'test_utf16.srt'))
self.assertTrue(len(sublist) > 1, 'parsed subtitle list has to have at least 2 entries')
self.assertTrue(encoding != '', 'cannot detect encoding')
self.assertTrue(encoding == 'utf-16', 'utf-16 file has to be decoded with utf-16 encoding')
def test_invalid_path_local(self):
self.assertRaises(LoadError, self.subsloader.load, 'dsadsa')
def test_invalid_path_remote(self):
self.assertRaises(LoadError, self.subsloader.load, 'http://dsakldmskla.srt')
def test_invalid_encoding(self):
self.subsloader.change_encodings(['utf-8'])
self.assertRaises(DecodeError, self.subsloader.load, os.path.join(SUBS_PATH, 'test_arabic.srt'))
def test_invalid_subtitles(self):
#self.assertRaises((ParseError,ParserNotFoundError), self.subsloader.load, os.path.join(SUBS_PATH,'test_invalid_file.srt'))
self.assertRaises((ParseError, ParserNotFoundError), self.subsloader.load, os.path.join(SUBS_PATH, 'test_vobsub.idx'))
def test_not_supported_size(self):
self.assertRaises(LoadError, self.subsloader.load, os.path.join(SUBS_PATH, 'test_vobsub.sub'))
| gpl-2.0 | 6,163,755,777,991,773,000 | 38.146667 | 131 | 0.625 | false |
nugget/home-assistant | homeassistant/components/android_ip_webcam/sensor.py | 2 | 2272 | """Support for Android IP Webcam sensors."""
from homeassistant.components.android_ip_webcam import (
KEY_MAP, ICON_MAP, DATA_IP_WEBCAM, AndroidIPCamEntity, CONF_HOST,
CONF_NAME, CONF_SENSORS)
from homeassistant.helpers.icon import icon_for_battery_level
DEPENDENCIES = ['android_ip_webcam']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the IP Webcam Sensor."""
if discovery_info is None:
return
host = discovery_info[CONF_HOST]
name = discovery_info[CONF_NAME]
sensors = discovery_info[CONF_SENSORS]
ipcam = hass.data[DATA_IP_WEBCAM][host]
all_sensors = []
for sensor in sensors:
all_sensors.append(IPWebcamSensor(name, host, ipcam, sensor))
async_add_entities(all_sensors, True)
class IPWebcamSensor(AndroidIPCamEntity):
"""Representation of a IP Webcam sensor."""
def __init__(self, name, host, ipcam, sensor):
"""Initialize the sensor."""
super().__init__(host, ipcam)
self._sensor = sensor
self._mapped_name = KEY_MAP.get(self._sensor, self._sensor)
self._name = '{} {}'.format(name, self._mapped_name)
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Retrieve latest state."""
if self._sensor in ('audio_connections', 'video_connections'):
if not self._ipcam.status_data:
return
self._state = self._ipcam.status_data.get(self._sensor)
self._unit = 'Connections'
else:
self._state, self._unit = self._ipcam.export_sensor(self._sensor)
@property
def icon(self):
"""Return the icon for the sensor."""
if self._sensor == 'battery_level' and self._state is not None:
return icon_for_battery_level(int(self._state))
return ICON_MAP.get(self._sensor, 'mdi:eye')
| apache-2.0 | 7,423,872,147,240,623,000 | 30.555556 | 77 | 0.618838 | false |
kojo1/wolfssl | wrapper/python/wolfcrypt/test/test_random.py | 2 | 1174 | # test_random.py
#
# Copyright (C) 2006-2020 wolfSSL Inc.
#
# This file is part of wolfSSL.
#
# wolfSSL is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# wolfSSL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
#/
import unittest
from wolfcrypt.random import *
class TestRandom(unittest.TestCase):
def setUp(self):
self.random = Random()
def test_byte(self):
assert len(self.random.byte()) == 1
def test_bytes(self):
assert len(self.random.bytes(1)) == 1
assert len(self.random.bytes(10)) == 10
assert len(self.random.bytes(100)) == 100
| gpl-2.0 | -5,940,509,680,656,191,000 | 29.102564 | 79 | 0.711244 | false |
AustinRoy7/Pomodoro-timer | main.py | 1 | 3039 | #!/usr/bin/env python
"""
This example uses docopt with the built in cmd module to demonstrate an
interactive command application.
Usage:
pomodoro start <task-title>
pomodoros time <duration-in-minutes>
pomodoro list <date>
pomodoro list_all
pomodoro clear
pomodoro (-i | --interactive)
pomodoro (-h | --help | --version)
pomodoro quit
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from pomodoro_timer import *
from termcolor import cprint
import sys
import os
import cmd
from docopt import docopt, DocoptExit
import datetime
import sqlite3
import time
from pyfiglet import Figlet,figlet_format
#creates database and cursor
conn = sqlite3.connect('pomodoro.db')
c = conn.cursor()
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action.
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match.
# We print a message to the user and the usage block.
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here.
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
class MyInteractive (cmd.Cmd):
f = Figlet(font = 'block')
print(cprint(figlet_format("My Pomo Timer", font = 'block'),'green','on_grey'))
intro = 'Welcome to pomodoro timer!' \
+ ' (type help for a list of commands.)'+ """
This example uses docopt with the built in cmd module to demonstrate an
interactive command application.
Usage:
pomodoro start <task-title>
pomodoro list <date>
pomodoro list_all
pomodoro delete_all
pomodoro (-i | --interactive)
pomodoro (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
"""
prompt = 'pomodoro '
file = None
def do_quit(self, arg):
"""Usage: quit"""
print('Good Bye!')
exit()
@docopt_cmd
def do_start(self, arg):
"""Usage: start <task-title>"""
new_task(arg['<task-title>'])
@docopt_cmd
def do_list(self, arg):
"""Usage: list <date>"""
list_tasks(arg['<date>'])
@docopt_cmd
def do_list_all(self, arg):
"""Usage: list_all"""
list_all_tasks()
@docopt_cmd
def do_delete_all(self, arg):
"""Usage: delete_all"""
delete_all()
@docopt_cmd
def do_stop_counter(self,arg):
"""Usage: stop"""
stop_task()
opt = docopt(__doc__, sys.argv[1:])
if opt['--interactive']:
MyInteractive().cmdloop()
print(opt)
| mit | 4,122,409,750,263,172,000 | 21.679104 | 83 | 0.602501 | false |
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_demoDeeplearning.py | 1 | 1337 | import sys
sys.path.insert(1,"../../../")
import h2o
def deepLearningDemo(ip, port):
h2o.init(ip, port)
# Training data
train_data = h2o.import_frame(path=h2o.locate("smalldata/gbm_test/ecology_model.csv"))
train_data = train_data.drop('Site')
train_data['Angaus'] = train_data['Angaus'].asfactor()
print train_data.describe()
train_data.head()
# Testing data
test_data = h2o.import_frame(path=h2o.locate("smalldata/gbm_test/ecology_eval.csv"))
test_data['Angaus'] = test_data['Angaus'].asfactor()
print test_data.describe()
test_data.head()
# Run GBM
gbm = h2o.gbm(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
ntrees=100,
distribution="bernoulli")
gbm.show()
# Run DeepLearning
dl = h2o.deeplearning(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
loss = 'CrossEntropy',
epochs = 1000,
hidden = [20, 20, 20])
dl.show()
if __name__ == "__main__":
h2o.run_test(sys.argv, deepLearningDemo)
| apache-2.0 | 2,299,426,901,288,439,000 | 27.446809 | 88 | 0.529544 | false |
chrisseto/elasticsearch-py | elasticsearch/connection/thrift.py | 10 | 3872 | from __future__ import absolute_import
from socket import timeout as SocketTimeout
from socket import error as SocketError
import time
import logging
try:
from .esthrift import Rest
from .esthrift.ttypes import Method, RestRequest
from thrift.transport import TTransport, TSocket, TSSLSocket
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TException
THRIFT_AVAILABLE = True
except ImportError:
THRIFT_AVAILABLE = False
from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout
from .pooling import PoolingConnection
logger = logging.getLogger('elasticsearch')
class ThriftConnection(PoolingConnection):
"""
This connection class is deprecated and may be removed in future versions.
Connection using the `thrift` protocol to communicate with elasticsearch.
See https://github.com/elasticsearch/elasticsearch-transport-thrift for additional info.
"""
transport_schema = 'thrift'
def __init__(self, host='localhost', port=9500, framed_transport=False, use_ssl=False, **kwargs):
"""
:arg framed_transport: use `TTransport.TFramedTransport` instead of
`TTransport.TBufferedTransport`
"""
if not THRIFT_AVAILABLE:
raise ImproperlyConfigured("Thrift is not available.")
super(ThriftConnection, self).__init__(host=host, port=port, **kwargs)
self._framed_transport = framed_transport
self._tsocket_class = TSocket.TSocket
if use_ssl:
self._tsocket_class = TSSLSocket.TSSLSocket
self._tsocket_args = (host, port)
def _make_connection(self):
socket = self._tsocket_class(*self._tsocket_args)
socket.setTimeout(self.timeout * 1000.0)
if self._framed_transport:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Rest.Client(protocol)
client.transport = transport
transport.open()
return client
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
request = RestRequest(method=Method._NAMES_TO_VALUES[method.upper()], uri=url,
parameters=params, body=body)
start = time.time()
tclient = None
try:
tclient = self._get_connection()
response = tclient.execute(request)
duration = time.time() - start
except SocketTimeout as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionTimeout('TIMEOUT', str(e), e)
except (TException, SocketError) as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
if tclient:
try:
# try closing transport socket
tclient.transport.close()
except Exception as e:
logger.warning(
'Exception %s occured when closing a failed thrift connection.',
e, exc_info=True
)
raise ConnectionError('N/A', str(e), e)
self._release_connection(tclient)
if not (200 <= response.status < 300) and response.status not in ignore:
self.log_request_fail(method, url, body, duration, response.status)
self._raise_error(response.status, response.body)
self.log_request_success(method, url, url, body, response.status,
response.body, duration)
headers = {}
if response.headers:
headers = dict((k.lower(), v) for k, v in response.headers.items())
return response.status, headers, response.body or ''
| apache-2.0 | -299,925,285,532,759,600 | 37.336634 | 101 | 0.639721 | false |
freeflightsim/ffs-app-engine | freeflightsim.appspot.com/distlib/werkzeug/contrib/jsrouting.py | 25 | 8282 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.templates import Template
from werkzeug.routing import NumberConverter
_javascript_routing_template = Template(u'''\
<% if name_parts %>\
<% for idx in xrange(0, len(name_parts) - 1) %>\
if (typeof ${'.'.join(name_parts[:idx + 1])} === 'undefined') \
${'.'.join(name_parts[:idx + 1])} = {};
<% endfor %>\
${'.'.join(name_parts)} = <% endif %>\
(function (server_name, script_name, subdomain, url_scheme) {
var converters = ${', '.join(converters)};
var rules = $rules;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})''')
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in rule._converters.iteritems():
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return _javascript_routing_template.render({
'name_parts': name and name.split('.') or [],
'rules': dumps(rules),
'converters': converters
})
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
| gpl-2.0 | 744,973,487,823,528,100 | 31.100775 | 76 | 0.515938 | false |
nkcr/WebIndex | app/venv/lib/python3.5/site-packages/nltk/stem/lancaster.py | 7 | 11234 | # Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Tomcavage <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A word stemmer based on the Lancaster stemming algorithm.
Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61.
"""
from __future__ import unicode_literals
import re
from nltk.stem.api import StemmerI
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class LancasterStemmer(StemmerI):
"""
Lancaster Stemmer
>>> from nltk.stem.lancaster import LancasterStemmer
>>> st = LancasterStemmer()
>>> st.stem('maximum') # Remove "-um" when word is intact
'maxim'
>>> st.stem('presumably') # Don't remove "-um" when word is not intact
'presum'
>>> st.stem('multiply') # No action taken if word ends with "-ply"
'multiply'
>>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules
'provid'
>>> st.stem('owed') # Word starting with vowel must contain at least 2 letters
'ow'
>>> st.stem('ear') # ditto
'ear'
>>> st.stem('saying') # Words starting with consonant must contain at least 3
'say'
>>> st.stem('crying') # letters and one of those letters must be a vowel
'cry'
>>> st.stem('string') # ditto
'string'
>>> st.stem('meant') # ditto
'meant'
>>> st.stem('cement') # ditto
'cem'
"""
# The rule list is static since it doesn't change between instances
rule_tuple = (
"ai*2.", # -ia > - if intact
"a*1.", # -a > - if intact
"bb1.", # -bb > -b
"city3s.", # -ytic > -ys
"ci2>", # -ic > -
"cn1t>", # -nc > -nt
"dd1.", # -dd > -d
"dei3y>", # -ied > -y
"deec2ss.", # -ceed >", -cess
"dee1.", # -eed > -ee
"de2>", # -ed > -
"dooh4>", # -hood > -
"e1>", # -e > -
"feil1v.", # -lief > -liev
"fi2>", # -if > -
"gni3>", # -ing > -
"gai3y.", # -iag > -y
"ga2>", # -ag > -
"gg1.", # -gg > -g
"ht*2.", # -th > - if intact
"hsiug5ct.", # -guish > -ct
"hsi3>", # -ish > -
"i*1.", # -i > - if intact
"i1y>", # -i > -y
"ji1d.", # -ij > -id -- see nois4j> & vis3j>
"juf1s.", # -fuj > -fus
"ju1d.", # -uj > -ud
"jo1d.", # -oj > -od
"jeh1r.", # -hej > -her
"jrev1t.", # -verj > -vert
"jsim2t.", # -misj > -mit
"jn1d.", # -nj > -nd
"j1s.", # -j > -s
"lbaifi6.", # -ifiabl > -
"lbai4y.", # -iabl > -y
"lba3>", # -abl > -
"lbi3.", # -ibl > -
"lib2l>", # -bil > -bl
"lc1.", # -cl > c
"lufi4y.", # -iful > -y
"luf3>", # -ful > -
"lu2.", # -ul > -
"lai3>", # -ial > -
"lau3>", # -ual > -
"la2>", # -al > -
"ll1.", # -ll > -l
"mui3.", # -ium > -
"mu*2.", # -um > - if intact
"msi3>", # -ism > -
"mm1.", # -mm > -m
"nois4j>", # -sion > -j
"noix4ct.", # -xion > -ct
"noi3>", # -ion > -
"nai3>", # -ian > -
"na2>", # -an > -
"nee0.", # protect -een
"ne2>", # -en > -
"nn1.", # -nn > -n
"pihs4>", # -ship > -
"pp1.", # -pp > -p
"re2>", # -er > -
"rae0.", # protect -ear
"ra2.", # -ar > -
"ro2>", # -or > -
"ru2>", # -ur > -
"rr1.", # -rr > -r
"rt1>", # -tr > -t
"rei3y>", # -ier > -y
"sei3y>", # -ies > -y
"sis2.", # -sis > -s
"si2>", # -is > -
"ssen4>", # -ness > -
"ss0.", # protect -ss
"suo3>", # -ous > -
"su*2.", # -us > - if intact
"s*1>", # -s > - if intact
"s0.", # -s > -s
"tacilp4y.", # -plicat > -ply
"ta2>", # -at > -
"tnem4>", # -ment > -
"tne3>", # -ent > -
"tna3>", # -ant > -
"tpir2b.", # -ript > -rib
"tpro2b.", # -orpt > -orb
"tcud1.", # -duct > -duc
"tpmus2.", # -sumpt > -sum
"tpec2iv.", # -cept > -ceiv
"tulo2v.", # -olut > -olv
"tsis0.", # protect -sist
"tsi3>", # -ist > -
"tt1.", # -tt > -t
"uqi3.", # -iqu > -
"ugo1.", # -ogu > -og
"vis3j>", # -siv > -j
"vie0.", # protect -eiv
"vi2>", # -iv > -
"ylb1>", # -bly > -bl
"yli3y>", # -ily > -y
"ylp0.", # protect -ply
"yl2>", # -ly > -
"ygo1.", # -ogy > -og
"yhp1.", # -phy > -ph
"ymo1.", # -omy > -om
"ypo1.", # -opy > -op
"yti3>", # -ity > -
"yte3>", # -ety > -
"ytl2.", # -lty > -l
"yrtsi5.", # -istry > -
"yra3>", # -ary > -
"yro3>", # -ory > -
"yfi3.", # -ify > -
"ycn2t>", # -ncy > -nt
"yca3>", # -acy > -
"zi2>", # -iz > -
"zy1s." # -yz > -ys
)
def __init__(self):
"""Create an instance of the Lancaster stemmer.
"""
# Setup an empty rule dictionary - this will be filled in later
self.rule_dictionary = {}
def parseRules(self, rule_tuple):
"""Validate the set of rules used in this stemmer.
"""
valid_rule = re.compile("^[a-z]+\*?\d[a-z]*[>\.]?$")
# Empty any old rules from the rule set before adding new ones
self.rule_dictionary = {}
for rule in rule_tuple:
if not valid_rule.match(rule):
raise ValueError("The rule %s is invalid" % rule)
first_letter = rule[0:1]
if first_letter in self.rule_dictionary:
self.rule_dictionary[first_letter].append(rule)
else:
self.rule_dictionary[first_letter] = [rule]
def stem(self, word):
"""Stem a word using the Lancaster stemmer.
"""
# Lower-case the word, since all the rules are lower-cased
word = word.lower()
# Save a copy of the original word
intact_word = word
# If the user hasn't supplied any rules, setup the default rules
if len(self.rule_dictionary) == 0:
self.parseRules(LancasterStemmer.rule_tuple)
return self.__doStemming(word, intact_word)
def __doStemming(self, word, intact_word):
"""Perform the actual word stemming
"""
valid_rule = re.compile("^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$")
proceed = True
while proceed:
# Find the position of the last letter of the word to be stemmed
last_letter_position = self.__getLastLetter(word)
# Only stem the word if it has a last letter and a rule matching that last letter
if last_letter_position < 0 or word[last_letter_position] not in self.rule_dictionary:
proceed = False
else:
rule_was_applied = False
# Go through each rule that matches the word's final letter
for rule in self.rule_dictionary[word[last_letter_position]]:
rule_match = valid_rule.match(rule)
if rule_match:
(ending_string,
intact_flag,
remove_total,
append_string,
cont_flag) = rule_match.groups()
# Convert the number of chars to remove when stemming
# from a string to an integer
remove_total = int(remove_total)
# Proceed if word's ending matches rule's word ending
if word.endswith(ending_string[::-1]):
if intact_flag:
if (word == intact_word and
self.__isAcceptable(word, remove_total)):
word = self.__applyRule(word,
remove_total,
append_string)
rule_was_applied = True
if cont_flag == '.':
proceed = False
break
elif self.__isAcceptable(word, remove_total):
word = self.__applyRule(word,
remove_total,
append_string)
rule_was_applied = True
if cont_flag == '.':
proceed = False
break
# If no rules apply, the word doesn't need any more stemming
if rule_was_applied == False:
proceed = False
return word
def __getLastLetter(self, word):
"""Get the zero-based index of the last alphabetic character in this string
"""
last_letter = -1
for position in range(len(word)):
if word[position].isalpha():
last_letter = position
else:
break
return last_letter
def __isAcceptable(self, word, remove_total):
"""Determine if the word is acceptable for stemming.
"""
word_is_acceptable = False
# If the word starts with a vowel, it must be at least 2
# characters long to be stemmed
if word[0] in "aeiouy":
if (len(word) - remove_total >= 2):
word_is_acceptable = True
# If the word starts with a consonant, it must be at least 3
# characters long (including one vowel) to be stemmed
elif (len(word) - remove_total >= 3):
if word[1] in "aeiouy":
word_is_acceptable = True
elif word[2] in "aeiouy":
word_is_acceptable = True
return word_is_acceptable
def __applyRule(self, word, remove_total, append_string):
"""Apply the stemming rule to the word
"""
# Remove letters from the end of the word
new_word_length = len(word) - remove_total
word = word[0:new_word_length]
# And add new letters to the end of the truncated word
if append_string:
word += append_string
return word
def __repr__(self):
return '<LancasterStemmer>'
| mit | -1,176,239,062,176,536,300 | 35.23871 | 98 | 0.422735 | false |
deKupini/erp | addons/account/report/account_general_ledger.py | 8 | 15349 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 CamptoCamp
# Copyright (c) 2006-2010 OpenERP S.A
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class general_ledger(report_sxw.rml_parse, common_report_header):
_name = 'report.account.general.ledger'
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
obj_move = self.pool.get('account.move.line')
self.sortby = data['form'].get('sortby', 'sort_date')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context',{}))
ctx2 = data['form'].get('used_context',{}).copy()
self.init_balance = data['form'].get('initial_balance', True)
if self.init_balance:
ctx2.update({'initial_bal': True})
self.init_query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)
self.display_account = data['form']['display_account']
self.target_move = data['form'].get('target_move', 'all')
ctx = self.context.copy()
ctx['fiscalyear'] = data['form']['fiscalyear_id']
if data['form']['filter'] == 'filter_period':
ctx['periods'] = data['form']['periods']
elif data['form']['filter'] == 'filter_date':
ctx['date_from'] = data['form']['date_from']
ctx['date_to'] = data['form']['date_to']
ctx['state'] = data['form']['target_move']
self.context.update(ctx)
if (data['model'] == 'ir.ui.menu'):
new_ids = [data['form']['chart_account_id']]
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(general_ledger, self).set_context(objects, data, new_ids, report_type=report_type)
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(general_ledger, self).__init__(cr, uid, name, context=context)
self.query = ""
self.tot_currency = 0.0
self.period_sql = ""
self.sold_accounts = {}
self.sortby = 'sort_date'
self.localcontext.update( {
'time': time,
'lines': self.lines,
'sum_debit_account': self._sum_debit_account,
'sum_credit_account': self._sum_credit_account,
'sum_balance_account': self._sum_balance_account,
'sum_currency_amount_account': self._sum_currency_amount_account,
'get_children_accounts': self.get_children_accounts,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_filter': self._get_filter,
'get_sortby': self._get_sortby,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def _sum_currency_amount_account(self, account):
self.cr.execute('SELECT sum(l.amount_currency) AS tot_currency \
FROM account_move_line l \
WHERE l.account_id = %s AND %s' %(account.id, self.query))
sum_currency = self.cr.fetchone()[0] or 0.0
if self.init_balance:
self.cr.execute('SELECT sum(l.amount_currency) AS tot_currency \
FROM account_move_line l \
WHERE l.account_id = %s AND %s '%(account.id, self.init_query))
sum_currency += self.cr.fetchone()[0] or 0.0
return sum_currency
def get_children_accounts(self, account):
res = []
currency_obj = self.pool.get('res.currency')
ids_acc = self.pool.get('account.account')._get_children_and_consol(self.cr, self.uid, account.id)
currency = account.currency_id and account.currency_id or account.company_id.currency_id
for child_account in self.pool.get('account.account').browse(self.cr, self.uid, ids_acc, context=self.context):
sql = """
SELECT count(id)
FROM account_move_line AS l
WHERE %s AND l.account_id = %%s
""" % (self.query)
self.cr.execute(sql, (child_account.id,))
num_entry = self.cr.fetchone()[0] or 0
sold_account = self._sum_balance_account(child_account)
self.sold_accounts[child_account.id] = sold_account
if self.display_account == 'movement':
if child_account.type != 'view' and num_entry <> 0:
res.append(child_account)
elif self.display_account == 'not_zero':
if child_account.type != 'view' and num_entry <> 0:
if not currency_obj.is_zero(self.cr, self.uid, currency, sold_account):
res.append(child_account)
else:
res.append(child_account)
if not res:
return [account]
return res
def lines(self, account):
""" Return all the account_move_line of account with their account code counterparts """
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted', '']
# First compute all counterpart strings for every move_id where this account appear.
# Currently, the counterpart info is used only in landscape mode
sql = """
SELECT m1.move_id,
array_to_string(ARRAY(SELECT DISTINCT a.code
FROM account_move_line m2
LEFT JOIN account_account a ON (m2.account_id=a.id)
WHERE m2.move_id = m1.move_id
AND m2.account_id<>%%s), ', ') AS counterpart
FROM (SELECT move_id
FROM account_move_line l
LEFT JOIN account_move am ON (am.id = l.move_id)
WHERE am.state IN %s and %s AND l.account_id = %%s GROUP BY move_id) m1
"""% (tuple(move_state), self.query)
self.cr.execute(sql, (account.id, account.id))
counterpart_res = self.cr.dictfetchall()
counterpart_accounts = {}
for i in counterpart_res:
counterpart_accounts[i['move_id']] = i['counterpart']
del counterpart_res
# Then select all account_move_line of this account
if self.sortby == 'sort_journal_partner':
sql_sort='j.code, p.name, l.move_id'
else:
sql_sort='l.date, l.move_id'
sql = """
SELECT l.id AS lid, l.date AS ldate, j.code AS lcode, l.currency_id,l.amount_currency,l.ref AS lref, l.name AS lname, COALESCE(l.debit,0) AS debit, COALESCE(l.credit,0) AS credit, l.period_id AS lperiod_id, l.partner_id AS lpartner_id,
m.name AS move_name, m.id AS mmove_id,per.code as period_code,
c.symbol AS currency_code,
i.id AS invoice_id, i.type AS invoice_type, i.number AS invoice_number,
p.name AS partner_name
FROM account_move_line l
JOIN account_move m on (l.move_id=m.id)
LEFT JOIN res_currency c on (l.currency_id=c.id)
LEFT JOIN res_partner p on (l.partner_id=p.id)
LEFT JOIN account_invoice i on (m.id =i.move_id)
LEFT JOIN account_period per on (per.id=l.period_id)
JOIN account_journal j on (l.journal_id=j.id)
WHERE %s AND m.state IN %s AND l.account_id = %%s ORDER by %s
""" %(self.query, tuple(move_state), sql_sort)
self.cr.execute(sql, (account.id,))
res_lines = self.cr.dictfetchall()
res_init = []
if res_lines and self.init_balance:
#FIXME: replace the label of lname with a string translatable
sql = """
SELECT 0 AS lid, '' AS ldate, '' AS lcode, COALESCE(SUM(l.amount_currency),0.0) AS amount_currency, '' AS lref, 'Initial Balance' AS lname, COALESCE(SUM(l.debit),0.0) AS debit, COALESCE(SUM(l.credit),0.0) AS credit, '' AS lperiod_id, '' AS lpartner_id,
'' AS move_name, '' AS mmove_id, '' AS period_code,
'' AS currency_code,
NULL AS currency_id,
'' AS invoice_id, '' AS invoice_type, '' AS invoice_number,
'' AS partner_name
FROM account_move_line l
LEFT JOIN account_move m on (l.move_id=m.id)
LEFT JOIN res_currency c on (l.currency_id=c.id)
LEFT JOIN res_partner p on (l.partner_id=p.id)
LEFT JOIN account_invoice i on (m.id =i.move_id)
JOIN account_journal j on (l.journal_id=j.id)
WHERE %s AND m.state IN %s AND l.account_id = %%s
""" %(self.init_query, tuple(move_state))
self.cr.execute(sql, (account.id,))
res_init = self.cr.dictfetchall()
res = res_init + res_lines
account_sum = 0.0
for l in res:
l['move'] = l['move_name'] != '/' and l['move_name'] or ('*'+str(l['mmove_id']))
l['partner'] = l['partner_name'] or ''
account_sum += l['debit'] - l['credit']
l['progress'] = account_sum
l['line_corresp'] = l['mmove_id'] == '' and ' ' or counterpart_accounts[l['mmove_id']].replace(', ',',')
# Modification of amount Currency
if l['credit'] > 0:
if l['amount_currency'] != None:
l['amount_currency'] = abs(l['amount_currency']) * -1
if l['amount_currency'] != None:
self.tot_currency = self.tot_currency + l['amount_currency']
return res
def _sum_debit_account(self, account):
if account.type == 'view':
return account.debit
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted','']
self.cr.execute('SELECT sum(debit) \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.query +' '
,(account.id, tuple(move_state)))
sum_debit = self.cr.fetchone()[0] or 0.0
if self.init_balance:
self.cr.execute('SELECT sum(debit) \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.init_query +' '
,(account.id, tuple(move_state)))
# Add initial balance to the result
sum_debit += self.cr.fetchone()[0] or 0.0
return sum_debit
def _sum_credit_account(self, account):
if account.type == 'view':
return account.credit
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted','']
self.cr.execute('SELECT sum(credit) \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.query +' '
,(account.id, tuple(move_state)))
sum_credit = self.cr.fetchone()[0] or 0.0
if self.init_balance:
self.cr.execute('SELECT sum(credit) \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.init_query +' '
,(account.id, tuple(move_state)))
# Add initial balance to the result
sum_credit += self.cr.fetchone()[0] or 0.0
return sum_credit
def _sum_balance_account(self, account):
if account.type == 'view':
return account.balance
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted','']
self.cr.execute('SELECT (sum(debit) - sum(credit)) as tot_balance \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.query +' '
,(account.id, tuple(move_state)))
sum_balance = self.cr.fetchone()[0] or 0.0
if self.init_balance:
self.cr.execute('SELECT (sum(debit) - sum(credit)) as tot_balance \
FROM account_move_line l \
JOIN account_move am ON (am.id = l.move_id) \
WHERE (l.account_id = %s) \
AND (am.state IN %s) \
AND '+ self.init_query +' '
,(account.id, tuple(move_state)))
# Add initial balance to the result
sum_balance += self.cr.fetchone()[0] or 0.0
return sum_balance
def _get_account(self, data):
if data['model'] == 'account.account':
return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['id']).company_id.name
return super(general_ledger ,self)._get_account(data)
def _get_sortby(self, data):
if self.sortby == 'sort_date':
return self._translate('Date')
elif self.sortby == 'sort_journal_partner':
return self._translate('Journal & Partner')
return self._translate('Date')
class report_generalledger(osv.AbstractModel):
_name = 'report.account.report_generalledger'
_inherit = 'report.abstract_report'
_template = 'account.report_generalledger'
_wrapped_report_class = general_ledger
| agpl-3.0 | 8,732,506,112,579,240,000 | 47.882166 | 268 | 0.550459 | false |
leilihh/cinder | cinder/volume/drivers/netapp/common.py | 9 | 4451 | # Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Alex Meade. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unified driver for NetApp storage systems.
Supports multiple storage systems of different families and protocols.
"""
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import driver
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap'
ESERIES_PATH = 'cinder.volume.drivers.netapp.eseries'
# Add new drivers here, no other code changes required.
NETAPP_UNIFIED_DRIVER_REGISTRY = {
'ontap_cluster':
{
'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver',
'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver',
'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver'
},
'ontap_7mode':
{
'iscsi': DATAONTAP_PATH + '.iscsi_7mode.NetApp7modeISCSIDriver',
'nfs': DATAONTAP_PATH + '.nfs_7mode.NetApp7modeNfsDriver',
'fc': DATAONTAP_PATH + '.fc_7mode.NetApp7modeFibreChannelDriver'
},
'eseries':
{
'iscsi': ESERIES_PATH + '.iscsi_driver.NetAppEseriesISCSIDriver',
'fc': ESERIES_PATH + '.fc_driver.NetAppEseriesFibreChannelDriver'
}}
class NetAppDriver(driver.ProxyVD):
"""NetApp unified block storage driver.
Acts as a factory to create NetApp storage drivers based on the
storage family and protocol configured.
"""
REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol']
def __new__(cls, *args, **kwargs):
config = kwargs.get('configuration', None)
if not config:
raise exception.InvalidInput(
reason=_('Required configuration not found'))
config.append_config_values(options.netapp_proxy_opts)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
na_utils.check_netapp_lib()
app_version = na_utils.OpenStackInfo().info()
LOG.info(_LI('OpenStack OS Version Info: %(info)s'),
{'info': app_version})
kwargs['app_version'] = app_version
return NetAppDriver.create_driver(config.netapp_storage_family,
config.netapp_storage_protocol,
*args, **kwargs)
@staticmethod
def create_driver(storage_family, storage_protocol, *args, **kwargs):
"""Creates an appropriate driver based on family and protocol."""
storage_family = storage_family.lower()
storage_protocol = storage_protocol.lower()
fmt = {'storage_family': storage_family,
'storage_protocol': storage_protocol}
LOG.info(_LI('Requested unified config: %(storage_family)s and '
'%(storage_protocol)s.'), fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported.')
% storage_family)
driver_loc = family_meta.get(storage_protocol)
if driver_loc is None:
raise exception.InvalidInput(
reason=_('Protocol %(storage_protocol)s is not supported '
'for storage family %(storage_family)s.') % fmt)
kwargs = kwargs or {}
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol '
'%(storage_protocol)s loaded.'), fmt)
return driver
| apache-2.0 | -1,998,569,655,063,882,000 | 37.37069 | 79 | 0.649742 | false |
malcolmhutchinson/nztrips | trips/migrations/0001_initial.py | 1 | 19727 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-11 06:49
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PointsOfInterest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age_of_dgps_data', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('dgps_id', models.TextField(blank=True, null=True)),
('elevation', models.TextField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('geoid_height', models.TextField(blank=True, null=True)),
('horizontal_dilution', models.TextField(blank=True, null=True)),
('latitude', models.TextField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('longitude', models.TextField(blank=True, null=True)),
('magnetic_variation', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('position_dilution', models.TextField(blank=True, null=True)),
('satellites', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('symbol', models.TextField(blank=True, null=True)),
('time', models.TextField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('type_of_gpx_fix', models.TextField(blank=True, null=True)),
('vertical_dilution', models.TextField(blank=True, null=True)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
),
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('number', models.TextField(blank=True, null=True)),
('points', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='RoutePoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age_of_dgps_data', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('dgps_id', models.TextField(blank=True, null=True)),
('elevation', models.FloatField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('geoid_height', models.TextField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('horizontal_dilution', models.TextField(blank=True, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('magnetic_variation', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('position_dilution', models.TextField(blank=True, null=True)),
('satellites', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('symbol', models.TextField(blank=True, null=True)),
('time', models.DateTimeField(blank=True, null=True)),
('type_of_gpx_fix', models.TextField(blank=True, null=True)),
('vertical_dilution', models.TextField(blank=True, null=True)),
('ordinal', models.IntegerField(default=0)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('route', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Route')),
],
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('trip_type', models.CharField(choices=[('air', 'air'), ('boat', 'boat'), ('cycle', 'cycle'), ('road', 'road'), ('tramping', 'tramping')], default='tramping', max_length=64)),
('name', models.CharField(blank=True, max_length=255, null=True)),
('subject', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=255, null=True)),
('days_length', models.IntegerField(default=1)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TemplateNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('content', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Template')),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('number', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('geom', django.contrib.gis.db.models.fields.MultiLineStringField(blank=True, null=True, srid=4326)),
],
),
migrations.CreateModel(
name='TrackPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age_of_dgps_data', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('course', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('dgps_id', models.TextField(blank=True, null=True)),
('elevation', models.FloatField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('geoid_height', models.TextField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('horizontal_dilution', models.TextField(blank=True, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('magnetic_variation', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('position_dilution', models.TextField(blank=True, null=True)),
('satellites', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('speed', models.TextField(blank=True, null=True)),
('symbol', models.TextField(blank=True, null=True)),
('time', models.DateTimeField(blank=True, null=True)),
('type_of_gpx_fix', models.TextField(blank=True, null=True)),
('vertical_dilution', models.TextField(blank=True, null=True)),
('ordinal', models.IntegerField(default=0)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'ordering': ['segment', 'ordinal'],
},
),
migrations.CreateModel(
name='TrackSegment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extensions', models.TextField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('ordinal', models.IntegerField(default=0)),
('geom', django.contrib.gis.db.models.fields.LineStringField(blank=True, null=True, srid=4326)),
('track', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Track')),
],
),
migrations.CreateModel(
name='Trip',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('trip_type', models.CharField(choices=[('air', 'air'), ('boat', 'boat'), ('cycle', 'cycle'), ('road', 'road'), ('tramping', 'tramping')], default='tramping', max_length=64)),
('name', models.CharField(blank=True, max_length=255, null=True)),
('subject', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=255, null=True)),
('days_length', models.IntegerField(default=1)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('start_date_planned', models.DateField(blank=True, null=True)),
('end_date_planned', models.DateField(blank=True, null=True)),
('start_date_actual', models.DateField(blank=True, null=True)),
('end_date_actual', models.DateField(blank=True, null=True)),
('templates', models.ManyToManyField(to='trips.Template')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TripNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('content', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('trip', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='trips.Trip')),
],
),
migrations.CreateModel(
name='TripReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('working', 'working'), ('pending', 'pending'), ('publshed', 'published'), ('withdrawn', 'withdrawn')], default='Unclassified', max_length=64)),
('date_pub', models.DateTimeField()),
('author', models.CharField(blank=True, max_length=255, null=True)),
('report_text', models.TextField(blank=True, null=True)),
('trip', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Trip')),
],
),
migrations.CreateModel(
name='Waypoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age_of_dgps_data', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('dgps_id', models.TextField(blank=True, null=True)),
('elevation', models.FloatField(blank=True, null=True)),
('extensions', models.TextField(blank=True, null=True)),
('geoid_height', models.TextField(blank=True, null=True)),
('horizontal_dilution', models.TextField(blank=True, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('link_text', models.TextField(blank=True, null=True)),
('link_type', models.TextField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('magnetic_variation', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('position_dilution', models.TextField(blank=True, null=True)),
('satellites', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('symbol', models.TextField(blank=True, null=True)),
('time', models.DateTimeField(blank=True, null=True)),
('gtype', models.TextField(blank=True, null=True)),
('type_of_gpx_fix', models.TextField(blank=True, null=True)),
('vertical_dilution', models.TextField(blank=True, null=True)),
('owner', models.CharField(blank=True, max_length=255, null=True)),
('group', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('provenance', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('trip', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Trip')),
],
),
migrations.AddField(
model_name='trackpoint',
name='segment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.TrackSegment'),
),
migrations.AddField(
model_name='track',
name='trip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trips.Trip'),
),
migrations.AddField(
model_name='route',
name='templates',
field=models.ManyToManyField(to='trips.Template'),
),
migrations.AddField(
model_name='route',
name='trips',
field=models.ManyToManyField(to='trips.Trip'),
),
migrations.AddField(
model_name='pointsofinterest',
name='template',
field=models.ManyToManyField(related_name='pois', to='trips.Template'),
),
migrations.AddField(
model_name='pointsofinterest',
name='trips',
field=models.ManyToManyField(related_name='pois', to='trips.Trip'),
),
]
| gpl-3.0 | -1,325,670,313,824,942,000 | 59.698462 | 197 | 0.564911 | false |
PDuckworth/strands_qsr_lib | qsr_prob_rep/src/qsrrep_pf/pf_model.py | 5 | 3043 | # -*- coding: utf-8 -*-
import numpy as np
from probability_density_functions import PredictionPdf, ObservationPdf
import json
class PfModel(object):
def __init__(self):
self._model = {}
def add_model(self, name, pred, obs):
"""Add a model to the particle filter model representation.
:param name: the name of the model, used to idenify it afterwards
:param pred: the prediction MxM matrix as a list or numpy array
:param obs: the observation scoring MxM matrix as a list or numpy array
"""
if name in self._model:
raise KeyError("Key '%s' already in model." % name)
pred = np.array(pred)
obs = np.array(obs)
if pred.shape != obs.shape:
raise IndexError("Prediction and observation matrix have to have the same shape.")
self._model[name] = {
PredictionPdf.key: pred.tolist(),
ObservationPdf.key: obs.tolist()
}
def add_prediction_matrix(self, name, pred):
"""Add a prediction matrix to an existing model. If model does not exists
yet, a new one will be created. Overrides existing prediction matrix if
model with the same name exists.
:param name: the name of the model, used to idenify it afterwards
:param pred: the prediction MxM matrix as a list or numpy array
"""
if not name in self._model:
self._model[name] = {}
pred = np.array(pred)
self._model[name][PredictionPdf.key] = pred.tolist()
def add_observation_matrix(self, name, obs):
"""Add an observation matrix to an existing model. If model does not exists
yet, a new one will be created. Overrides existing observation matrix if
model with the same name exists.
:param name: the name of the model, used to idenify it afterwards
:param obs: the observation scoring MxM matrix as a list or numpy array
"""
if not name in self._model:
self._model[name] = {}
obs = np.array(obs)
self._model[name][ObservationPdf.key] = obs.tolist()
def __check_model(self):
for k,v in self._model.items():
if PredictionPdf.key not in v:
raise KeyError("'%s' matrix not in dictionary for model '%s'" % (PredictionPdf.key, k))
if ObservationPdf.key not in v:
raise KeyError("'%s' matrix not in dictionary for model '%s'" % (ObservationPdf.key, k))
pred = np.array(v[PredictionPdf.key])
obs = np.array(v[ObservationPdf.key])
if pred.shape != obs.shape:
raise IndexError("Prediction and observation matrix have to have the same shape for model '%s'." %k)
def to_string(self):
"""
:return: json string of model
"""
self.__check_model()
return json.dumps(self._model)
def get(self):
"""
:return: the model dict
"""
self.__check_model()
return self._model
| mit | 8,792,007,464,509,104,000 | 34.383721 | 116 | 0.600723 | false |
fabsx00/joern-old | sourceutils/pythonCFGs/CSVToCFG.py | 1 | 14333 | import pickle, os
from sourceutils.csvASTs.CSVProcessor import CSVProcessor
from sourceutils.csvASTs.CSVRowAccessors import getCSVRowType, getCSVRowLevel
from sourceutils.pythonCFGs.CFG import CFG, BasicBlock
labelNode = 'label'
returnNode = 'return'
breakNode = 'break'
continueNode = 'continue'
gotoNode = 'goto'
breakOrContinue = set([breakNode, continueNode])
controlStatementNodes = set([returnNode, gotoNode]) | breakOrContinue
ifNode = 'if'
conditionNode = 'cond'
switchNode = 'switch'
elseNode = 'else'
doNode = 'do'
loopNodes = set(['for', 'while', doNode])
scopeIncreaseNodes = set([ifNode, switchNode, conditionNode, elseNode]) | loopNodes
class CSV2CFG(CSVProcessor):
def __init__(self):
CSVProcessor.__init__(self)
self.currentCFG = None
self.resetStacks()
self.defaultHandler = self.handleNode
self.functionLevel = -1
def resetStacks(self):
self.scopeStack = []
self.returnStack = []
self.breakContinueStack = []
self.ifBodyStack = []
self.gotoList = []
def _initCFG(self, row):
self.currentCFG = CFG()
self.currentCFG.addNode(BasicBlock(row))
self.currentLevel = int(getCSVRowLevel(row)) + 1
self.functionLevel = self.currentLevel - 1
self.functionName = row[5]
self.functionPos = row[1]
self.resetStacks()
def handleNode(self, row):
lineType = getCSVRowType(row)
if self._isInFunction():
self.handleNodeInFunction(row)
else:
if lineType == 'func':
self._initCFG(row)
def handleNodeInFunction(self, row):
level = int(getCSVRowLevel(row))
if self.isInScope(level):
self.handleNodeInScope(row, level)
else:
self.leaveScope(row, level)
def handleNodeInScope(self, row, level):
if self.isScopeIncreaseNode(row):
self.handleScopeIncreaseNode(row, level)
elif self.isControlStatementNode(row):
self.handleControlStatement(row, level)
elif self.isLabelNode(row):
self.handleLabelNode(row, level)
else:
self.defaultNodeHandler(row, level)
def isInScope(self, level):
return level >= self.getCurrentLevel()
def isScopeIncreaseNode(self, row):
return getCSVRowType(row) in scopeIncreaseNodes
def isControlStatementNode(self, row):
return getCSVRowType(row) in controlStatementNodes
def isLabelNode(self, row):
return getCSVRowType(row) == labelNode
# loops, if, else, switch
def handleScopeIncreaseNode(self, row, level):
currentNodeId = self.currentCFG.getCurrentNodeId()
nodeType = getCSVRowType(row)
currentNode = self.currentCFG.getNodeById(currentNodeId)
if nodeType == elseNode:
self.handleElseNode(currentNodeId, row)
return
if currentNode.rows != [] and nodeType != conditionNode:
self.createAndConnectNode(row)
else:
self.currentCFG.appendToLatestNode(row)
self.enterScope(row)
def getCondition(self, predNodeId):
predNode = self.currentCFG.getNodeById(predNodeId)
return predNode.getCondition()
def handleElseNode(self, currentNodeId, row):
(predNodeId, unused1, predNodeLevel) = self.scopeStack[-1]
conditionStr = self.getCondition(predNodeId)
conditionStr += ' == False'
self.currentCFG.addEdge(predNodeId, currentNodeId + 1, conditionStr)
self.currentCFG.addNode(BasicBlock(row))
self.ifBodyStack.append((currentNodeId, predNodeLevel))
self.enterScope(row)
def createAndConnectNode(self, row = None, conditionStr=None):
newBasicBlock = BasicBlock(row)
if row: newBasicBlock.blockType = row[0]
newNodeId = self.currentCFG.addNode(newBasicBlock)
self.currentCFG.addEdge(newNodeId - 1, newNodeId, conditionStr)
def enterScope(self, row):
# TODO: simplify: just push nodeId and row
currentNodeId = self.currentCFG.getCurrentNodeId()
level = int(getCSVRowLevel(row))
nodeType = getCSVRowType(row)
self.scopeStack.append((currentNodeId, nodeType, level))
self.currentLevel = level + 1
# print 'enter scope: %s' % (self.scopeStack)
def handleControlStatement(self, row, level):
currentNodeId = self.currentCFG.getCurrentNodeId()
rowType = getCSVRowType(row)
if rowType == returnNode:
self.returnStack.append(currentNodeId)
elif rowType in breakOrContinue:
self.breakContinueStack.append((currentNodeId, rowType, level))
elif rowType == gotoNode:
self.gotoList.append((currentNodeId, row))
self.currentCFG.appendToLatestNode(row)
def handleLabelNode(self, row, level):
currentNodeId = self.currentCFG.getCurrentNodeId()
currentNode = self.currentCFG.getNodeById(currentNodeId)
if currentNode.rows != []:
self.currentCFG.addNode(BasicBlock(row))
currentNodeId = self.currentCFG.getCurrentNodeId()
previousNode = self.currentCFG.getNodeById(currentNodeId -1)
lastInstrType = previousNode.getLastInstrType()
if not lastInstrType in controlStatementNodes:
self.currentCFG.addEdge(currentNodeId -1, currentNodeId)
else:
self.currentCFG.appendToLatestNode(row)
self.currentCFG.labeledNodes.append((currentNodeId,row))
def defaultNodeHandler(self, row, level):
self.currentCFG.appendToLatestNode(row)
#####
def connectPredicateToExitNode(self, predicateNodeId):
conditionStr = self.getCondition(predicateNodeId)
conditionStr += ' == False'
self.currentCFG.addEdge(predicateNodeId,
self.currentCFG.getCurrentNodeId(),
conditionStr)
def onCFGFinished(self):
labelDict = self._genLabelDict()
for (nodeId, row) in self.gotoList:
dstLabel = row[4].strip()
self.currentCFG.removeEdgesFrom(nodeId)
if not labelDict.has_key(dstLabel):
print 'can\'t resolve label : ' + dstLabel
continue
dstNodeId = labelDict[dstLabel]
self.currentCFG.addEdge(nodeId, dstNodeId)
self.createAndConnectNode()
exitNodeId = self.currentCFG.getCurrentNodeId()
for nodeId in self.returnStack:
self.currentCFG.removeEdge(nodeId, nodeId + 1)
self.currentCFG.addEdge(nodeId, exitNodeId)
self.currentCFG.registerSuccessors()
def _genLabelDict(self):
d = dict()
for (nodeId, row) in self.currentCFG.labeledNodes:
label = row[4][:-1].strip()
d[label] = nodeId
return d
def leaveScope(self, row, level):
while level < self.currentLevel:
if self.scopeStack == []:
if level > self.functionLevel:
print 'Error: scopeStack empty but level > functionLevel: %d > %d' % (level, self.functionLevel)
self.onCFGFinished()
self.outputAndReset()
self.currentLevel = -1
if row[0] == 'func':
self._initCFG(row)
return
previousLevel = self.currentLevel
(predicateNodeId, predicateNodeType) = self.exitScope()
if self.leavingIfScope(predicateNodeType):
self.leaveIfScope(previousLevel, predicateNodeId)
elif self.leavingSwitchScope(predicateNodeType):
self.leaveSwitchScope(predicateNodeId)
elif self.leavingConditionScope(predicateNodeType):
conditionStr = self.getCondition(predicateNodeId)
conditionStr += ' == True'
self.createAndConnectNode(None, conditionStr)
if predicateNodeType in (loopNodes | set([switchNode])):
self.leaveLoopOrSwitch(predicateNodeType, predicateNodeId)
# Now all scopes, which were closed by this row are closed.
self.handleNodeInScope(row, level)
def leaveIfScope(self, previousLevel, predicateNodeId):
self.createAndConnectNode()
exitNodeId = self.currentCFG.getCurrentNodeId()
if self.ifBodyStack != []:
(ifBodyId, ifBodyLevel) = self.ifBodyStack.pop()
if ifBodyLevel != previousLevel -1:
self.ifBodyStack.append((ifBodyId, ifBodyLevel))
self.connectPredicateToExitNode(predicateNodeId)
else:
self.currentCFG.addEdge(ifBodyId, exitNodeId)
else:
self.connectPredicateToExitNode(predicateNodeId)
def leaveSwitchScope(self, predicateNodeId):
self.createAndConnectNode()
labeledNodesCopy = self.currentCFG.labeledNodes[:]
while labeledNodesCopy != []:
(labelNodeId, labelRow) = labeledNodesCopy.pop()
labelLevel = int(getCSVRowLevel(labelRow))
if labelLevel <= self.getCurrentLevel():
break
conditionStr = self.getCondition(predicateNodeId)
conditionStr += ' == ' + labelRow[4]
self.currentCFG.addEdge(predicateNodeId, labelNodeId, conditionStr)
def leaveLoopOrSwitch(self, predicateNodeType, predicateNodeId):
if predicateNodeType == doNode:
exitNodeId = self.leaveDoScope(predicateNodeId)
elif predicateNodeType == switchNode:
exitNodeId = self.currentCFG.getCurrentNodeId()
else:
self.currentCFG.addNode(BasicBlock(None))
exitNodeId = self.currentCFG.getCurrentNodeId()
conditionStr = self.getCondition(predicateNodeId) + ' == False'
self.currentCFG.addEdge(predicateNodeId, exitNodeId, conditionStr)
self.currentCFG.addEdge(exitNodeId - 1, predicateNodeId)
self.attachBreakAndContinueNodes(exitNodeId, predicateNodeId)
def leaveDoScope(self, predicateNodeId):
self.createAndConnectNode()
condNodeId = self.currentCFG.getCurrentNodeId()
conditionStr1 = self.getCondition(predicateNodeId)
conditionStr = conditionStr1 + ' == False'
self.createAndConnectNode(None, conditionStr)
exitNodeId = self.currentCFG.getCurrentNodeId()
conditionStr = conditionStr1 + ' == True'
self.currentCFG.addEdge(condNodeId, predicateNodeId, conditionStr)
self.currentCFG.removeEdgesFrom(predicateNodeId)
self.currentCFG.addEdge(predicateNodeId, predicateNodeId + 1)
predicateNode = self.currentCFG.getNodeById(predicateNodeId)
condition = predicateNode.rows[:]
predicateNode.rows = []
self.currentCFG.getNodeById(condNodeId).rows.extend(condition)
predicateNode.blockType = 'do'
return exitNodeId
def attachBreakAndContinueNodes(self, currentNodeId, predicateNodeId):
while self.breakContinueStack != []:
(breakNodeId, breakNodeType, breakNodeLevel) = self.breakContinueStack.pop()
if breakNodeLevel <= self.currentLevel:
self.breakContinueStack.append((breakNodeId, breakNodeType, breakNodeLevel))
break
self.currentCFG.removeEdge(breakNodeId, breakNodeId + 1)
if breakNodeType == breakNode:
self.currentCFG.addEdge(breakNodeId, currentNodeId)
elif breakNodeType == continueNode:
self.currentCFG.addEdge(breakNodeId, predicateNodeId)
def exitScope(self):
(predicateNodeId, predicateNodeType, unusedNodeLevel) = self.scopeStack.pop()
self.adjustIfBodyStack()
try:
self.currentLevel = self.scopeStack[-1][2] + 1
except:
self.currentLevel = self.functionLevel + 1
# print 'exitScope %s %d' %(self.scopeStack, self.currentLevel)
return (predicateNodeId, predicateNodeType)
def terminateFunction(self):
row = ['exitNode', '0:0', '0:0', '%d' % (self.functionLevel)]
self.handleNode(row)
def _isInFunction(self):
return self.currentCFG != None
def _resetCFG(self):
self.currentCFG = None
self.functionLevel = -1
def outputAndReset(self):
self.save()
self._resetCFG()
def leavingScope(self, level):
return (level < self.getCurrentLevel())
def leavingConditionScope(self, predicateNodeType):
return (predicateNodeType == conditionNode)
def leavingElseScope(self, predicateNodeType):
return (predicateNodeType == elseNode)
def leavingIfScope(self, predicateNodeType):
return (predicateNodeType == ifNode)
def leavingSwitchScope(self, predicatedNodeType):
return (predicatedNodeType == switchNode)
def enteringElseScope(self, currentNodeType):
return (currentNodeType == elseNode)
def getCurrentLevel(self):
return self.currentLevel
def adjustIfBodyStack(self):
self.ifBodyStack = [i for i in self.ifBodyStack if i[1] <= self.currentLevel]
def save(self):
outputDir = '/'.join(self.currentFile. split('/')[:-1])
outputDir += '/' + self.functionName + '_' + self.functionPos.replace(':', '_')
if not os.path.exists(outputDir):
os.mkdir(outputDir)
outputFilename = outputDir + '/cfg.pickle'
f = open(outputFilename, 'wb')
pickle.dump(self.currentCFG, f)
f.close()
| gpl-3.0 | -4,552,906,977,551,252,000 | 35.19697 | 116 | 0.612433 | false |
arbn/pysaml2 | src/saml2/mdstore.py | 1 | 23913 | import logging
import sys
import json
from hashlib import sha1
from saml2.httpbase import HTTPBase
from saml2.extension.idpdisc import BINDING_DISCO
from saml2.extension.idpdisc import DiscoveryResponse
from saml2.mdie import to_dict
from saml2 import md
from saml2 import samlp
from saml2 import SAMLError
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_SOAP
from saml2.s_utils import UnsupportedBinding, UnknownPrincipal
from saml2.sigver import split_len
from saml2.validate import valid_instance
from saml2.time_util import valid
from saml2.validate import NotValid
from saml2.sigver import security_context
__author__ = 'rolandh'
logger = logging.getLogger(__name__)
class ToOld(Exception):
pass
REQ2SRV = {
# IDP
"authn_request": "single_sign_on_service",
"name_id_mapping_request": "name_id_mapping_service",
# AuthnAuthority
"authn_query": "authn_query_service",
# AttributeAuthority
"attribute_query": "attribute_service",
# PDP
"authz_decision_query": "authz_service",
# AuthnAuthority + IDP + PDP + AttributeAuthority
"assertion_id_request": "assertion_id_request_service",
# IDP + SP
"logout_request": "single_logout_service",
"manage_name_id_request": "manage_name_id_service",
"artifact_query": "artifact_resolution_service",
# SP
"assertion_response": "assertion_consumer_service",
"attribute_response": "attribute_consuming_service",
"discovery_service_request": "discovery_response"
}
ENTITYATTRIBUTES = "urn:oasis:names:tc:SAML:metadata:attribute&EntityAttributes"
# ---------------------------------------------------
def destinations(srvs):
return [s["location"] for s in srvs]
def attribute_requirement(entity):
res = {"required": [], "optional": []}
for acs in entity["attribute_consuming_service"]:
for attr in acs["requested_attribute"]:
if "is_required" in attr and attr["is_required"] == "true":
res["required"].append(attr)
else:
res["optional"].append(attr)
return res
def name(ent, langpref="en"):
try:
org = ent["organization"]
except KeyError:
return None
for info in ["organization_display_name",
"organization_name",
"organization_url"]:
try:
for item in org[info]:
if item["lang"] == langpref:
return item["text"]
except KeyError:
pass
return None
def repack_cert(cert):
part = cert.split("\n")
if len(part) == 1:
part = part[0].strip()
return "\n".join(split_len(part, 64))
else:
return "\n".join([s.strip() for s in part])
class MetaData(object):
def __init__(self, onts, attrc, metadata=""):
self.onts = onts
self.attrc = attrc
self.entity = {}
self.metadata = metadata
def items(self):
return self.entity.items()
def keys(self):
return self.entity.keys()
def values(self):
return self.entity.values()
def __contains__(self, item):
return item in self.entity
def __getitem__(self, item):
return self.entity[item]
def do_entity_descriptor(self, entity_descr):
try:
if not valid(entity_descr.valid_until):
logger.info("Entity descriptor (entity id:%s) to old" % (
entity_descr.entity_id,))
return
except AttributeError:
pass
# have I seen this entity_id before ? If so if log: ignore it
if entity_descr.entity_id in self.entity:
print >> sys.stderr,\
"Duplicated Entity descriptor (entity id: '%s')" %\
entity_descr.entity_id
return
_ent = to_dict(entity_descr, self.onts)
flag = 0
# verify support for SAML2
for descr in ["spsso", "idpsso", "role", "authn_authority",
"attribute_authority", "pdp", "affiliation"]:
_res = []
try:
_items = _ent["%s_descriptor" % descr]
except KeyError:
continue
if descr == "affiliation": # Not protocol specific
flag += 1
continue
for item in _items:
for prot in item["protocol_support_enumeration"].split(" "):
if prot == samlp.NAMESPACE:
item["protocol_support_enumeration"] = prot
_res.append(item)
break
if not _res:
del _ent["%s_descriptor" % descr]
else:
flag += 1
if flag:
self.entity[entity_descr.entity_id] = _ent
def parse(self, xmlstr):
self.entities_descr = md.entities_descriptor_from_string(xmlstr)
if not self.entities_descr:
self.entity_descr = md.entity_descriptor_from_string(xmlstr)
if self.entity_descr:
self.do_entity_descriptor(self.entity_descr)
else:
try:
valid_instance(self.entities_descr)
except NotValid, exc:
logger.error(exc.args[0])
return
try:
if not valid(self.entities_descr.valid_until):
raise ToOld("Metadata not valid anymore")
except AttributeError:
pass
for entity_descr in self.entities_descr.entity_descriptor:
self.do_entity_descriptor(entity_descr)
def load(self):
self.parse(self.metadata)
def _service(self, entity_id, typ, service, binding=None):
""" Get me all services with a specified
entity ID and type, that supports the specified version of binding.
:param entity_id: The EntityId
:param typ: Type of service (idp, attribute_authority, ...)
:param service: which service that is sought for
:param binding: A binding identifier
:return: list of service descriptions.
Or if no binding was specified a list of 2-tuples (binding, srv)
"""
logger.debug("_service(%s, %s, %s, %s)" % (entity_id, typ, service,
binding))
try:
srvs = []
for t in self[entity_id][typ]:
try:
srvs.extend(t[service])
except KeyError:
pass
except KeyError:
return None
if not srvs:
return srvs
if binding:
res = []
for srv in srvs:
if srv["binding"] == binding:
res.append(srv)
else:
res = {}
for srv in srvs:
try:
res[srv["binding"]].append(srv)
except KeyError:
res[srv["binding"]] = [srv]
logger.debug("_service => %s" % res)
return res
def _ext_service(self, entity_id, typ, service, binding):
try:
srvs = self[entity_id][typ]
except KeyError:
return None
if not srvs:
return srvs
res = []
for srv in srvs:
if "extensions" in srv:
for elem in srv["extensions"]["extension_elements"]:
if elem["__class__"] == service:
if elem["binding"] == binding:
res.append(elem)
return res
def any(self, typ, service, binding=None):
"""
Return any entity that matches the specification
:param typ:
:param service:
:param binding:
:return:
"""
res = {}
for ent in self.keys():
bind = self._service(ent, typ, service, binding)
if bind:
res[ent] = bind
return res
def bindings(self, entity_id, typ, service):
"""
Get me all the bindings that are registered for a service entity
:param entity_id:
:param service:
:return:
"""
return self._service(entity_id, typ, service)
def attribute_requirement(self, entity_id, index=0):
""" Returns what attributes the SP requires and which are optional
if any such demands are registered in the Metadata.
:param entity_id: The entity id of the SP
:param index: which of the attribute consumer services its all about
:return: 2-tuple, list of required and list of optional attributes
"""
res = {"required": [], "optional": []}
try:
for sp in self[entity_id]["spsso_descriptor"]:
_res = attribute_requirement(sp)
res["required"].extend(_res["required"])
res["optional"].extend(_res["optional"])
except KeyError:
return None
return res
def dumps(self):
return json.dumps(self.items(), indent=2)
def with_descriptor(self, descriptor):
res = {}
desc = "%s_descriptor" % descriptor
for eid, ent in self.items():
if desc in ent:
res[eid] = ent
return res
def __str__(self):
return "%s" % self.items()
def construct_source_id(self):
res = {}
for eid, ent in self.items():
for desc in ["spsso_descriptor", "idpsso_descriptor"]:
try:
for srv in ent[desc]:
if "artifact_resolution_service" in srv:
s = sha1(eid)
res[s.digest()] = ent
except KeyError:
pass
return res
def entity_categories(self, entity_id):
res = []
if "extensions" in self[entity_id]:
for elem in self[entity_id]["extensions"]["extension_elements"]:
if elem["__class__"] == ENTITYATTRIBUTES:
for attr in elem["attribute"]:
res.append(attr["text"])
return res
class MetaDataFile(MetaData):
"""
Handles Metadata file on the same machine. The format of the file is
the SAML Metadata format.
"""
def __init__(self, onts, attrc, filename, cert=None):
MetaData.__init__(self, onts, attrc)
self.filename = filename
self.cert = cert
def load(self):
_txt = open(self.filename).read()
if self.cert:
node_name = "%s:%s" % (md.EntitiesDescriptor.c_namespace,
md.EntitiesDescriptor.c_tag)
if self.security.verify_signature(_txt,
node_name=node_name,
cert_file=self.cert):
self.parse(_txt)
return True
else:
self.parse(_txt)
return True
class MetaDataExtern(MetaData):
"""
Class that handles metadata store somewhere on the net.
Accessible but HTTP GET.
"""
def __init__(self, onts, attrc, url, security, cert, http):
"""
:params onts:
:params attrc:
:params url:
:params security: SecurityContext()
:params cert:
:params http:
"""
MetaData.__init__(self, onts, attrc)
self.url = url
self.security = security
self.cert = cert
self.http = http
def load(self):
""" Imports metadata by the use of HTTP GET.
If the fingerprint is known the file will be checked for
compliance before it is imported.
"""
response = self.http.send(self.url)
if response.status_code == 200:
node_name = "%s:%s" % (md.EntitiesDescriptor.c_namespace,
md.EntitiesDescriptor.c_tag)
_txt = response.text.encode("utf-8")
if self.cert:
if self.security.verify_signature(_txt,
node_name=node_name,
cert_file=self.cert):
self.parse(_txt)
return True
else:
self.parse(_txt)
return True
else:
logger.info("Response status: %s" % response.status)
return False
class MetaDataMD(MetaData):
"""
Handles locally stored metadata, the file format is the text representation
of the Python representation of the metadata.
"""
def __init__(self, onts, attrc, filename):
MetaData.__init__(self, onts, attrc)
self.filename = filename
def load(self):
for key, item in json.loads(open(self.filename).read()):
self.entity[key] = item
class MetadataStore(object):
def __init__(self, onts, attrc, config, ca_certs=None,
disable_ssl_certificate_validation=False):
"""
:params onts:
:params attrc:
:params config: Config()
:params ca_certs:
:params disable_ssl_certificate_validation:
"""
self.onts = onts
self.attrc = attrc
self.http = HTTPBase(verify=disable_ssl_certificate_validation,
ca_bundle=ca_certs)
self.security = security_context(config)
self.ii = 0
self.metadata = {}
def load(self, typ, *args, **kwargs):
if typ == "local":
key = args[0]
md = MetaDataFile(self.onts, self.attrc, args[0])
elif typ == "inline":
self.ii += 1
key = self.ii
md = MetaData(self.onts, self.attrc, args[0])
elif typ == "remote":
key = kwargs["url"]
md = MetaDataExtern(self.onts, self.attrc,
kwargs["url"], self.security,
kwargs["cert"], self.http)
elif typ == "mdfile":
key = args[0]
md = MetaDataMD(self.onts, self.attrc, args[0])
else:
raise SAMLError("Unknown metadata type '%s'" % typ)
md.load()
self.metadata[key] = md
def imp(self, spec):
for key, vals in spec.items():
for val in vals:
if isinstance(val, dict):
self.load(key, **val)
else:
self.load(key, val)
def _service(self, entity_id, typ, service, binding=None):
known_principal = False
for key, md in self.metadata.items():
srvs = md._service(entity_id, typ, service, binding)
if srvs:
return srvs
elif srvs is None:
pass
else:
known_principal = True
if known_principal:
logger.error("Unsupported binding: %s (%s)" % (binding, entity_id))
raise UnsupportedBinding(binding)
else:
logger.error("Unknown principal: %s" % entity_id)
raise UnknownPrincipal(entity_id)
def _ext_service(self, entity_id, typ, service, binding=None):
known_principal = False
for key, md in self.metadata.items():
srvs = md._ext_service(entity_id, typ, service, binding)
if srvs:
return srvs
elif srvs is None:
pass
else:
known_principal = True
if known_principal:
raise UnsupportedBinding(binding)
else:
raise UnknownPrincipal(entity_id)
def single_sign_on_service(self, entity_id, binding=None, typ="idpsso"):
# IDP
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "idpsso_descriptor",
"single_sign_on_service", binding)
def name_id_mapping_service(self, entity_id, binding=None, typ="idpsso"):
# IDP
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "idpsso_descriptor",
"name_id_mapping_service", binding)
def authn_query_service(self, entity_id, binding=None,
typ="authn_authority"):
# AuthnAuthority
if binding is None:
binding = BINDING_SOAP
return self._service(entity_id, "authn_authority_descriptor",
"authn_query_service", binding)
def attribute_service(self, entity_id, binding=None,
typ="attribute_authority"):
# AttributeAuthority
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "attribute_authority_descriptor",
"attribute_service", binding)
def authz_service(self, entity_id, binding=None, typ="pdp"):
# PDP
if binding is None:
binding = BINDING_SOAP
return self._service(entity_id, "pdp_descriptor",
"authz_service", binding)
def assertion_id_request_service(self, entity_id, binding=None, typ=None):
# AuthnAuthority + IDP + PDP + AttributeAuthority
if typ is None:
raise AttributeError("Missing type specification")
if binding is None:
binding = BINDING_SOAP
return self._service(entity_id, "%s_descriptor" % typ,
"assertion_id_request_service", binding)
def single_logout_service(self, entity_id, binding=None, typ=None):
# IDP + SP
if typ is None:
raise AttributeError("Missing type specification")
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "%s_descriptor" % typ,
"single_logout_service", binding)
def manage_name_id_service(self, entity_id, binding=None, typ=None):
# IDP + SP
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "%s_descriptor" % typ,
"manage_name_id_service", binding)
def artifact_resolution_service(self, entity_id, binding=None, typ=None):
# IDP + SP
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "%s_descriptor" % typ,
"artifact_resolution_service", binding)
def assertion_consumer_service(self, entity_id, binding=None, _="spsso"):
# SP
if binding is None:
binding = BINDING_HTTP_POST
return self._service(entity_id, "spsso_descriptor",
"assertion_consumer_service", binding)
def attribute_consuming_service(self, entity_id, binding=None, _="spsso"):
# SP
if binding is None:
binding = BINDING_HTTP_REDIRECT
return self._service(entity_id, "spsso_descriptor",
"attribute_consuming_service", binding)
def discovery_response(self, entity_id, binding=None, _="spsso"):
if binding is None:
binding = BINDING_DISCO
return self._ext_service(entity_id, "spsso_descriptor",
"%s&%s" % (DiscoveryResponse.c_namespace,
DiscoveryResponse.c_tag),
binding)
def attribute_requirement(self, entity_id, index=0):
for md in self.metadata.values():
if entity_id in md:
return md.attribute_requirement(entity_id, index)
def keys(self):
res = []
for md in self.metadata.values():
res.extend(md.keys())
return res
def __getitem__(self, item):
for md in self.metadata.values():
try:
return md[item]
except KeyError:
pass
raise KeyError(item)
def __setitem__(self, key, value):
self.metadata[key] = value
def entities(self):
num = 0
for md in self.metadata.values():
num += len(md.items())
return num
def __len__(self):
return len(self.metadata)
def with_descriptor(self, descriptor):
res = {}
for md in self.metadata.values():
res.update(md.with_descriptor(descriptor))
return res
def name(self, entity_id, langpref="en"):
for md in self.metadata.values():
if entity_id in md.items():
return name(md[entity_id], langpref)
return None
def certs(self, entity_id, descriptor, use="signing"):
ent = self.__getitem__(entity_id)
if descriptor == "any":
res = []
for descr in ["spsso", "idpsso", "role", "authn_authority",
"attribute_authority", "pdp"]:
try:
srvs = ent["%s_descriptor" % descr]
except KeyError:
continue
for srv in srvs:
for key in srv["key_descriptor"]:
if "use" in key and key["use"] == use:
for dat in key["key_info"]["x509_data"]:
cert = repack_cert(
dat["x509_certificate"]["text"])
if cert not in res:
res.append(cert)
elif not "use" in key:
for dat in key["key_info"]["x509_data"]:
cert = repack_cert(
dat["x509_certificate"]["text"])
if cert not in res:
res.append(cert)
else:
srvs = ent["%s_descriptor" % descriptor]
res = []
for srv in srvs:
for key in srv["key_descriptor"]:
if "use" in key and key["use"] == use:
for dat in key["key_info"]["x509_data"]:
res.append(dat["x509_certificate"]["text"])
elif not "use" in key:
for dat in key["key_info"]["x509_data"]:
res.append(dat["x509_certificate"]["text"])
return res
def vo_members(self, entity_id):
ad = self.__getitem__(entity_id)["affiliation_descriptor"]
return [m["text"] for m in ad["affiliate_member"]]
def entity_categories(self, entity_id):
ext = self.__getitem__(entity_id)["extensions"]
res = []
for elem in ext["extension_elements"]:
if elem["__class__"] == ENTITYATTRIBUTES:
for attr in elem["attribute"]:
if attr["name"] == "http://macedir.org/entity-category":
res.extend([v["text"] for v in attr["attribute_value"]])
return res
def bindings(self, entity_id, typ, service):
for md in self.metadata.values():
if entity_id in md.items():
return md.bindings(entity_id, typ, service)
return None
def __str__(self):
_str = ["{"]
for key, val in self.metadata.items():
_str.append("%s: %s" % (key, val))
_str.append("}")
return "\n".join(_str)
def construct_source_id(self):
res = {}
for md in self.metadata.values():
res.update(md.construct_source_id())
return res
def items(self):
res = {}
for md in self.metadata.values():
res.update(md.items())
return res.items()
| bsd-2-clause | -8,767,561,212,267,403,000 | 31.802469 | 80 | 0.522603 | false |
AndiDog/git-cola | share/doc/git-cola/conf.py | 3 | 1507 | # -*- coding: utf-8 -*-
import os
import sys
# Add the cola source directory to sys.path
abspath = os.path.abspath(os.path.realpath(__file__))
docdir = os.path.dirname(os.path.dirname(abspath))
srcdir = os.path.dirname(os.path.dirname(docdir))
extrasdir = os.path.join(srcdir, 'extras')
sys.path.insert(1, extrasdir)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinxtogithub']
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8'
master_doc = 'index'
project = 'git-cola'
copyright = '2007-2017, David Aguilar and contributors'
authors = 'David Aguilar and contributors'
versionfile = os.path.join(srcdir, 'cola', '_version.py')
scope = {}
with open(versionfile) as f:
exec(f.read(), scope)
# The short X.Y version.
version = scope['VERSION']
# The full version, including alpha/beta/rc tags.
release = version
exclude_trees = ['_build']
add_function_parentheses = True
pygments_style = 'default'
html_theme = 'default'
html_theme_path = ['_themes']
html_static_path = ['_static']
html_show_sourcelink = True
htmlhelp_basename = 'git-cola-doc'
man_pages = [
('git-cola', 'git-cola', 'The highly caffeinated Git GUI',
authors, '1'),
('git-dag', 'git-dag', 'The sleek and powerful Git history browser',
authors, '1'),
]
latex_documents = [
('index', 'git-cola.tex', 'git-cola Documentation',
'David Aguilar and contributors', 'manual'),
]
| gpl-2.0 | -3,992,718,401,411,175,000 | 25.910714 | 70 | 0.667551 | false |
killerwhile/polaris-gslb | polaris_health/state/pool.py | 1 | 14844 | # -*- coding: utf-8 -*-
import logging
import ipaddress
import random
from polaris_health import Error, config, monitors
from polaris_health.util import topology
__all__ = [ 'PoolMember', 'Pool' ]
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
MAX_POOL_MEMBER_NAME_LEN = 256
MAX_POOL_MEMBER_WEIGHT = 99
MAX_POOL_NAME_LEN = 256
MAX_REGION_LEN = 256
MAX_MAX_ADDRS_RETURNED = 32
def pprint_status(status):
"""Convert bool status into a health status string"""
if status is True:
return 'UP'
elif status is False:
return 'DOWN'
elif status is None:
return 'NEW/UNKNOWN'
else:
raise Error('Invalid status "{}"'.format(status))
class PoolMember:
"""A backend server, member of a pool"""
def __init__(self, ip, name, weight, region=None):
"""
args:
ip: string, IP address
name: string, name of the server
weight: int, weight of the server, if set to 0 the server
is disabled
region: string, id of the region, used in topology-based
distribution
"""
### ip
try:
_ip = ipaddress.ip_address(ip)
except ValueError:
log_msg = ('"{}" does not appear to be a valid IP address'
.format(ip))
LOG.error(log_msg)
raise Error(log_msg)
if _ip.version != 4:
log_msg = 'only v4 IP addresses are currently supported'
LOG.error(log_msg)
raise Error(log_msg)
self.ip = ip
### name
if (not isinstance(name, str) or len(name) > MAX_POOL_MEMBER_NAME_LEN):
log_msg = ('"{}" name must be a str, {} chars max'.
format(name, MAX_POOL_MEMBER_NAME_LEN))
LOG.error(log_msg)
raise Error(log_msg)
else:
self.name = name
### weight
if (not isinstance(weight, int) or weight < 0
or weight > MAX_POOL_MEMBER_WEIGHT):
log_msg = ('"{}" weight "{}" must be an int between 0 and {}'.
format(name, weight, MAX_POOL_MEMBER_WEIGHT))
raise Error(log_msg)
else:
self.weight = weight
### region
if (not region is None
and (not isinstance(region, (str))
or len(region) > MAX_REGION_LEN)):
log_msg = ('"{}" region "{}" must be a str, {} chars max'.
format(name, region, MAX_POOL_MEMBER_NAME_LEN))
LOG.error(log_msg)
raise Error(log_msg)
else:
self.region = region
# curent status of the server
# None = new, True = up, False = down
self.status = None
# reason why this status has been set
self.status_reason = None
# timestamp when the probe was issued last time
# used to determine when to send a new probe
self.last_probe_issued_time = None
# this is used by tracker to determine how many more
# probing requests to attempt before declaring the member down
# set to the parent's pool monitor retries value initially
self.retries_left = None
class Pool:
"""A pool of backend servers"""
LB_METHOD_OPTIONS = [ 'wrr', 'twrr' ]
FALLBACK_OPTIONS = [ 'any', 'refuse' ]
def __init__(self, name, monitor, members, lb_method,
fallback='any', max_addrs_returned=1):
"""
args:
name: string, name of the pool
monitor: obj derived from monitors.BaseMonitor
members: dict where keys are IP addresses of members,
values are PoolMember objects
lb_method: string, distribution method name
fallback: sring, one of "any", "nodata", "refuse"
resolution behaviour when all members of the pool are DOWN
"any": perform distribution amongst all configured
members(ignore health status)
"refuse": refuse queries
max_addrs_returned: int, max number of A records to return in
response
"""
### name
self.name = name
if (not isinstance(name, str)
or len(name) > MAX_POOL_NAME_LEN):
log_msg = ('"{}" name must be a str, {} chars max'.
format(name, MAX_POOL_NAME_LEN))
LOG.error(log_msg)
raise Error(log_msg)
### monitor
self.monitor = monitor
### members
self.members = members
### lb_method
self.lb_method = lb_method
if (not isinstance(lb_method, str)
or lb_method not in self.LB_METHOD_OPTIONS):
_lb_methods = ', '.join(self.LB_METHOD_OPTIONS)
log_msg = ('lb_method "{}" must be a str one of {}'.
format(lb_method, _lb_methods))
LOG.error(log_msg)
raise Error(log_msg)
### fallback
self.fallback = fallback
if (not isinstance(fallback, str)
or fallback not in self.FALLBACK_OPTIONS):
_fallbacks = ', '.join(self.FALLBACK_OPTIONS)
log_msg = ('fallback "{}" must be a str one of {}'.
format(fallback, _fallbacks))
LOG.error(log_msg)
raise Error(log_msg)
# max_addrs_returned
self.max_addrs_returned = max_addrs_returned
if (not isinstance(max_addrs_returned, int) or max_addrs_returned < 1
or max_addrs_returned > MAX_MAX_ADDRS_RETURNED):
log_msg = ('"{}" max_addrs_returned "{}" must be an int '
'between 1 and {}'
.format(name, max_addrs_returned,
MAX_MAX_ADDRS_RETURNED))
raise Error(log_msg)
# last known status None, True, False
self.last_status = None
########################
### public interface ###
########################
@property
def status(self):
"""Return health status of the pool.
Read-only property based on health status of the pool members.
Return True is any member with non-0 weight of the pool is UP,
False otherwise.
"""
for member_ip in self.members:
if self.members[member_ip].weight > 0 \
and self.members[member_ip].status:
return True
return False
@classmethod
def from_config_dict(cls, name, obj):
"""Build a Pool object from a config dict
args:
name: string, name of the pool
obj: dict, config dict
"""
############################
### mandatory parameters ###
############################
### monitor
if obj['monitor'] not in monitors.registered:
log_msg = 'unknown monitor "{}"'.format(obj['monitor'])
LOG.error(log_msg)
raise Error(log_msg)
else:
monitor_name = obj['monitor']
if 'monitor_params' in obj:
if not obj['monitor_params']:
log_msg = 'monitor_params should not be empty'
LOG.error(log_msg)
raise Error(log_msg)
monitor_params = obj['monitor_params']
else:
monitor_params = {}
monitor = monitors.registered[monitor_name](**monitor_params)
### lb_method
lb_method = obj['lb_method']
### members
members = {}
# validate "members" key is present and not empty
if not 'members' in obj or not obj['members']:
log_msg = ('configuration dictionary must contain '
'a non-empty "members" key')
LOG.error(log_msg)
raise Error(log_msg)
for member_ip in obj['members']:
member_name = obj['members'][member_ip]['name']
weight = obj['members'][member_ip]['weight']
region = None
# if topology round robin method is used
# set region on the pool member
if lb_method == 'twrr':
region = topology.get_region(
member_ip, config.TOPOLOGY_MAP)
if not region:
log_msg = ('Unable to determine region for pool '
'{0} member {1}({2})'.
format(name, member_ip, member_name))
LOG.error(log_msg)
raise Error(log_msg)
members[member_ip] = PoolMember(ip=member_ip,
name=member_name,
weight=weight,
region=region)
###########################
### optional parameters ###
###########################
pool_optional_params = {}
### fallback
if 'fallback' in obj:
pool_optional_params['fallback'] = obj['fallback']
### max_addrs_returned
if 'max_addrs_returned' in obj:
pool_optional_params['max_addrs_returned'] = \
obj['max_addrs_returned']
# return Pool object
return cls(name=name,
monitor=monitor,
lb_method=lb_method,
members=members,
**pool_optional_params)
def to_dist_dict(self):
"""Return dict representation of the Pool required by Polaris PDNS
to perform query distribution.
"_default" distribution table is always built.
Region-specific distribution tables are built only if the pool is using
a topology lb method and has an UP member.
Example:
{
'status' : True,
'lb_method': 'twrr',
'fallback': 'any',
'max_addrs_returned': 1,
'dist_tables': {
'_default': {
'rotation': [ '192.168.1.1', '192.168.1.2' ],
'num_unique_addrs': 2,
'index': 1
},
'region1': {
'rotation': [ '192.168.1.1' ],
'num_unique_addrs': 1,
'index': 0
},
'region2': {
'rotation': [ '192.168.1.2' ],
'num_unique_addrs': 1,
'index': 0
},
}
}
"""
obj = {}
### status
obj['status'] = self.status
### lb_method
obj['lb_method'] = self.lb_method
### fallback
obj['fallback'] = self.fallback
### max_addrs_returned
obj['max_addrs_returned'] = self.max_addrs_returned
### distribution tables
dist_tables = {}
# always build the _default distribution table
dist_tables['_default'] = {}
dist_tables['_default']['rotation'] = []
dist_tables['_default']['num_unique_addrs'] = 0
##################
### pool is UP ###
##################
if self.status:
for member_ip in self.members:
member = self.members[member_ip]
# do not add members with the weight of 0 - member is disabled
if member.weight == 0:
continue
# do not add members in DOWN state
if not member.status:
continue
#
# add to the _default table
#
# add the member IP times it's weight into
# the _default distribution table
for i in range(member.weight):
dist_tables['_default']['rotation'].append(member_ip)
# increase the number of unique addresses in the _default by 1
dist_tables['_default']['num_unique_addrs'] += 1
#
# add to a regional table
#
# if a topology lb method is used, add the member's IP
# to a corresponding regional table
if self.lb_method == 'twrr':
# create the regional table if it does not exist
if member.region not in dist_tables:
dist_tables[member.region] = {}
dist_tables[member.region]['rotation'] = []
dist_tables[member.region]['num_unique_addrs'] = 0
# add the member IP times it's weight
# into the regional table
for i in range(member.weight):
dist_tables[member.region]['rotation'].append(member_ip)
# increase the number of unique addresses in the table by 1
dist_tables[member.region]['num_unique_addrs'] += 1
####################
### pool is DOWN ###
####################
else:
for member_ip in self.members:
member = self.members[member_ip]
# do not add members with weight of 0 - member is disabled
if member.weight == 0:
continue
# add to the _default table if fallback is set to 'any'
if self.fallback == 'any':
# add the member IP times it's weight into
# the _default distribution table
for i in range(member.weight):
dist_tables['_default']['rotation'].append(member_ip)
# increase the number of unique addresses
# in the _default by 1
dist_tables['_default']['num_unique_addrs'] += 1
##################################
### post tables creation tasks ###
##################################
for name in dist_tables:
# randomly shuffle the rotation list
random.shuffle(dist_tables[name]['rotation'])
# create index used by ppdns for distribution,
# set it to a random position, when ppdns is
# syncing its internal state from shared memory, indexes gets
# reset, we want to avoid starting from 0 every time
dist_tables[name]['index'] = \
int(random.random() * len(dist_tables[name]['rotation']))
obj['dist_tables'] = dist_tables
return obj
| bsd-3-clause | -8,665,529,963,169,246,000 | 32.967963 | 80 | 0.48464 | false |
vintasoftware/django-templated-email | tests/test_get_templated_mail.py | 2 | 2466 | from django.test import TestCase
from mock import patch
from templated_email import get_templated_mail
class GetTemplatedMailTestCase(TestCase):
TEST_ARGS = ['a_template_name', {'context': 'content'}]
TEST_KWARGS = {
'from_email': '[email protected]',
'to': ['[email protected]'],
'cc': ['[email protected]'],
'bcc': ['[email protected]'],
'headers': {'A_HEADER': 'foo'},
'template_prefix': 'prefix',
'template_suffix': 'suffix',
'template_dir': 'dirp',
'file_extension': 'ext',
'create_link': False,
}
@patch('templated_email.TemplateBackend')
def test_get_templated_mail_returns_response_of_get_email_message(
self, mocked_backend):
ret = get_templated_mail(*self.TEST_ARGS)
self.assertTrue(
ret is mocked_backend.return_value.get_email_message.return_value)
@patch('templated_email.TemplateBackend')
def test_called_get_email_message_from_vanilla_backend(self, mocked_backend):
get_templated_mail(*self.TEST_ARGS)
mocked_backend.return_value.get_email_message.assert_called_once()
@patch('templated_email.TemplateBackend')
def test_arguments_get_passsed_to_get_email_message(self, mocked_backend):
get_templated_mail(*self.TEST_ARGS, **self.TEST_KWARGS)
mocked_backend.assert_called_with(template_prefix='prefix',
template_suffix='suffix')
get_email_message = mocked_backend.return_value.get_email_message
kwargs = dict(self.TEST_KWARGS)
del kwargs['template_dir']
del kwargs['file_extension']
get_email_message.assert_called_with(*self.TEST_ARGS, **kwargs)
@patch('templated_email.TemplateBackend')
def test_arguments_get_email_message_fallback(self, mocked_backend):
kwargs = dict(self.TEST_KWARGS)
del kwargs['template_prefix']
del kwargs['template_suffix']
get_templated_mail(*self.TEST_ARGS, **kwargs)
mocked_backend.assert_called_with(template_prefix=kwargs['template_dir'],
template_suffix=kwargs['file_extension'])
get_email_message = mocked_backend.return_value.get_email_message
kwargs['template_prefix'] = kwargs.pop('template_dir')
kwargs['template_suffix'] = kwargs.pop('file_extension')
get_email_message.assert_called_with(*self.TEST_ARGS, **kwargs)
| mit | -1,729,526,627,829,810,200 | 37.53125 | 83 | 0.637064 | false |
ArcEye/MK-Qt5 | lib/python/gladevcp/hal_widgets.py | 17 | 13019 | # vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Chris Morley, Pavel Shramov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import gobject
import gtk
import hal
hal_pin_changed_signal = ('hal-pin-changed', (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_OBJECT,)))
""" Set of base classes """
class _HalWidgetBase:
def hal_init(self, comp, name):
self.hal, self.hal_name = comp, name
self._hal_init()
def _hal_init(self):
""" Child HAL initialization functions """
pass
def hal_update(self):
""" Update HAL state """
pass
class _HalToggleBase(_HalWidgetBase):
def _hal_init(self):
self.set_active(False)
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_BIT, hal.HAL_OUT)
self.hal_pin_not = self.hal.newpin(self.hal_name + "-not", hal.HAL_BIT, hal.HAL_OUT)
self.connect("toggled", self.hal_update)
def hal_update(self, *a):
active = bool(self.get_active())
self.hal_pin.set(active)
self.hal_pin_not.set(not active)
class _HalScaleBase(_HalWidgetBase):
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_FLOAT, hal.HAL_OUT)
self.connect("value-changed", self.hal_update)
def hal_update(self, *a):
self.hal_pin.set(self.get_value())
class _HalIOScaleBase(_HalWidgetBase):
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_FLOAT, hal.HAL_IO)
self.connect("value-changed", self.hal_update)
self.hal_pin.connect('value-changed', lambda s: self.emit('hal-pin-changed', s))
self.hal_current = self.hal_pin.get()
def hal_update(self, *a):
hval = self.hal_pin.get()
if self.hal_current != hval:
self.hal_current = hval
self.set_value(hval)
return
wval = self.get_value()
if wval != hval:
self.hal_pin.set(wval)
class _HalSensitiveBase(_HalWidgetBase):
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_BIT, hal.HAL_IN)
self.hal_pin.connect('value-changed', lambda s: self.set_sensitive(s.value))
self.hal_pin.connect('value-changed', lambda s: self.emit('hal-pin-changed', s))
class _HalJogWheelBase(_HalWidgetBase):
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_S32, hal.HAL_OUT)
try:
self.get_scaled_value()
self.hal_pin_scaled = self.hal.newpin(self.hal_name+'-scaled', hal.HAL_FLOAT, hal.HAL_OUT)
except:
pass
try:
self.get_delta_scaled_value()
self.hal_pin_delta_scaled = self.hal.newpin(self.hal_name+'-delta-scaled', hal.HAL_FLOAT, hal.HAL_OUT)
except:
pass
def hal_update(self, *a):
data = self.get_value()
self.hal_pin.set(int(data))
try:
data = self.get_scaled_value()
self.hal_pin_scaled.set(float(data))
except:
pass
try:
data = self.get_delta_scaled_value()
self.hal_pin_delta_scaled.set(float(data))
except:
pass
class _HalSpeedControlBase(_HalWidgetBase):
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name + '.value', hal.HAL_FLOAT, hal.HAL_OUT)
self.connect("value-changed", self.hal_update)
def hal_update(self, *a):
self.hal_pin.set(self.get_value())
""" Real widgets """
class HAL_HBox(gtk.HBox, _HalSensitiveBase):
__gtype_name__ = "HAL_HBox"
__gsignals__ = dict([hal_pin_changed_signal])
class HAL_Table(gtk.Table, _HalSensitiveBase):
__gtype_name__ = "HAL_Table"
__gsignals__ = dict([hal_pin_changed_signal])
class HAL_HideTable(gtk.Table, _HalWidgetBase):
__gtype_name__ = "HAL_HideTable"
__gsignals__ = dict([hal_pin_changed_signal])
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_BIT, hal.HAL_IN)
self.hal_pin.connect('value-changed',self.update)
def update(self,*a):
value = self.hal_pin.get()
if value:
self.hide()
else:
self.show()
class HAL_ComboBox(gtk.ComboBox, _HalWidgetBase):
__gtype_name__ = "HAL_ComboBox"
__gproperties__ = {
'column' : ( gobject.TYPE_INT, 'Column', '-1:return value of index, other: column index of value in ListStore',
-1, 100, -1, gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
}
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name in ['column']:
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name in ['column']:
return setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
def _hal_init(self):
self.hal_pin_f = self.hal.newpin(self.hal_name+"-f", hal.HAL_FLOAT, hal.HAL_OUT)
self.hal_pin_s = self.hal.newpin(self.hal_name+"-s", hal.HAL_S32, hal.HAL_OUT)
self.connect("changed", self.hal_update)
def hal_update(self, *a):
index = self.get_active()
if self.column == -1: # just use index
v = index
else:
model = self.get_model()
v = model[index][self.column]
self.hal_pin_s.set(int(v))
self.hal_pin_f.set(float(v))
class HAL_Button(gtk.Button, _HalWidgetBase):
__gtype_name__ = "HAL_Button"
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_BIT, hal.HAL_OUT)
def _f(w, data):
self.hal_pin.set(data)
self.connect("pressed", _f, True)
self.connect("released", _f, False)
self.emit("released")
class HALIO_Button(gtk.ToggleButton, _HalWidgetBase):
__gtype_name__ = "HALIO_Button"
def _hal_init(self):
self.set_active(False)
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_BIT, hal.HAL_IO)
def _f(w, data):
self.set_active(True)
self.hal_pin.set(data)
self.connect("pressed", _f, True)
self.hal_pin.connect('value-changed', self.hal_update)
def hal_update(self, *a):
active = bool(self.hal_pin.get())
self.set_active(active)
class HAL_CheckButton(gtk.CheckButton, _HalToggleBase):
__gtype_name__ = "HAL_CheckButton"
class HAL_SpinButton(gtk.SpinButton, _HalWidgetBase):
__gtype_name__ = "HAL_SpinButton"
def hal_update(self, *a):
data = self.get_value()
self.hal_pin_f.set(float(data))
self.hal_pin_s.set(int(data))
def _hal_init(self):
self.hal_pin_f = self.hal.newpin(self.hal_name+"-f", hal.HAL_FLOAT, hal.HAL_OUT)
self.hal_pin_s = self.hal.newpin(self.hal_name+"-s", hal.HAL_S32, hal.HAL_OUT)
self.connect("value-changed", self.hal_update)
self.emit("value-changed")
class HAL_RadioButton(gtk.RadioButton, _HalToggleBase):
__gtype_name__ = "HAL_RadioButton"
class HAL_ToggleButton(gtk.ToggleButton, _HalToggleBase):
__gtype_name__ = "HAL_ToggleButton"
class HAL_HScale(gtk.HScale, _HalScaleBase):
__gtype_name__ = "HAL_HScale"
class HALIO_HScale(gtk.HScale, _HalIOScaleBase):
__gtype_name__ = "HALIO_HScale"
__gsignals__ = dict([hal_pin_changed_signal])
class HAL_VScale(gtk.VScale, _HalScaleBase):
__gtype_name__ = "HAL_VScale"
class HAL_ProgressBar(gtk.ProgressBar, _HalWidgetBase):
__gtype_name__ = "HAL_ProgressBar"
__gproperties__ = {
'scale' : ( gobject.TYPE_FLOAT, 'Value Scale',
'Set maximum absolute value of input', -2**24, 2**24, 0,
gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'green_limit' : ( gobject.TYPE_FLOAT, 'green zone limit',
'lower limit of green zone', 0, 1, 0,
gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'yellow_limit' : ( gobject.TYPE_FLOAT, 'yellow zone limit',
'lower limit of yellow zone', 0, 1, 0,
gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'red_limit' : ( gobject.TYPE_FLOAT, 'red zone limit',
'lower limit of red zone', 0, 1, 0,
gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'text_template' : ( gobject.TYPE_STRING, 'text template',
'Text template to display. Python formatting may be used for dict {"value":value}',
"", gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name in self.__gproperties.keys():
return setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
def _hal_init(self):
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_FLOAT, hal.HAL_IN)
self.hal_pin_scale = self.hal.newpin(self.hal_name+".scale", hal.HAL_FLOAT, hal.HAL_IN)
if self.yellow_limit or self.red_limit:
bar.set_fraction(0)
bar.modify_bg(gtk.STATE_PRELIGHT, gtk.gdk.Color('#0f0'))
if self.text_template:
self.set_text(self.text_template % {'value':0})
def hal_update(self):
scale = self.hal_pin_scale.get() or self.scale
setting = self.hal_pin.get()
if scale <= 0 : scale = 1
if setting < 0 : setting = 0
if (setting/scale) >1:
setting = 1
scale = 1
old = self.get_fraction()
new = setting/scale
self.set_fraction(setting/scale)
if old == new:
return
if self.text_template:
self.set_text(self.text_template % {'value':setting})
colors = []
if self.yellow_limit:
colors.append((self.yellow_limit, 'yellow'))
if self.red_limit:
colors.append((self.red_limit, 'red'))
if colors:
colors.insert(0, (0, 'green'))
color = None
for (l,c), (h, _) in zip(colors, colors[1:] + [(1, None)]):
if new < l or new >= h:
pass
elif old < l or old >= h:
color = c
break
if color:
self.modify_bg(gtk.STATE_PRELIGHT, gtk.gdk.color_parse(color))
class HAL_Label(gtk.Label, _HalWidgetBase):
__gtype_name__ = "HAL_Label"
__gsignals__ = dict([hal_pin_changed_signal])
__gproperties__ = {
'label_pin_type' : ( gobject.TYPE_INT, 'HAL pin type', '0:S32 1:Float 2:U32',
0, 2, 0, gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
'text_template' : ( gobject.TYPE_STRING, 'text template',
'Text template to display. Python formatting may be used for one variable',
"%s", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
}
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name in ['label_pin_type', 'text_template']:
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name in ['label_pin_type', 'text_template']:
return setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
def _hal_init(self):
types = {0:hal.HAL_S32
,1:hal.HAL_FLOAT
,2:hal.HAL_U32
}
pin_type = types.get(self.label_pin_type, None)
if pin_type is None:
raise TypeError("%s: Invalid pin type: %s" % (self.hal_name, self.label_pin_type))
self.hal_pin = self.hal.newpin(self.hal_name, pin_type, hal.HAL_IN)
self.hal_pin.connect('value-changed',
lambda p: self.set_text(self.text_template % p.value))
self.hal_pin.connect('value-changed', lambda s: self.emit('hal-pin-changed', s))
| lgpl-2.1 | -8,120,444,566,619,983,000 | 35.365922 | 120 | 0.590214 | false |
itoed/anaconda | pyanaconda/simpleconfig.py | 8 | 6761 | #
# simpleconifg.py - representation of a simple configuration file (sh-like)
#
# Copyright (C) 1999-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Author(s): Matt Wilson <[email protected]>
# Jeremy Katz <[email protected]>
# Will Woods <[email protected]>
# Brian C. Lane <[email protected]>
#
import os
import shutil
import shlex
from pipes import _safechars
import tempfile
from pyanaconda.iutil import upperASCII, eintr_retry_call
def unquote(s):
return ' '.join(shlex.split(s))
def quote(s, always=False):
""" If always is set it returns a quoted value
"""
if not always:
for c in s:
if c not in _safechars:
break
else:
return s
return '"'+s.replace('"', '\\"')+'"'
def find_comment(s):
""" Look for a # comment outside of a quoted string.
If there are no quotes, find the last # in the string.
:param str s: string to check for comment and quotes
:returns: index of comment or None
:rtype: int or None
Handles comments inside quotes and quotes inside quotes.
"""
q = None
for i in range(len(s)):
if not q and s[i] == '#':
return i
# Ignore quotes inside other quotes
if s[i] in "'\"":
if s[i] == q:
q = None
elif q is None:
q = s[i]
return None
class SimpleConfigFile(object):
""" Edit values in a configuration file without changing comments.
Supports KEY=VALUE lines and ignores everything else.
Supports adding new keys.
Supports deleting keys.
Preserves comment, blank lines and comments on KEY lines
Does not support duplicate key entries.
"""
def __init__(self, filename=None, read_unquote=True, write_quote=True,
always_quote=False):
self.filename = filename
self.read_unquote = read_unquote
self.write_quote = write_quote
self.always_quote = always_quote
self.reset()
def reset(self):
self._lines = []
self.info = {}
def read(self, filename=None):
""" passing filename will override the filename passed to init.
save the lines into self._lines and the key/value pairs into
self.info
"""
filename = filename or self.filename
with open(filename) as f:
for line in f:
self._lines.append(line)
key, value, _comment = self._parseline(line)
if key:
self.info[key] = value
def write(self, filename=None, use_tmp=True):
""" passing filename will override the filename passed to init.
"""
filename = filename or self.filename
if not filename:
return None
if use_tmp:
tmpf = tempfile.NamedTemporaryFile(mode="w", delete=False)
tmpf.write(str(self))
tmpf.close()
# Move the temporary file (with 0600 permissions) over the top of the
# original and preserve the original's permissions
filename = os.path.realpath(filename)
if os.path.exists(filename):
m = os.stat(filename).st_mode
else:
m = int('0100644', 8)
shutil.move(tmpf.name, filename)
eintr_retry_call(os.chmod, filename, m)
else:
# write directly to the file
with open(filename, "w") as fobj:
fobj.write(str(self))
def set(self, *args):
for key, value in args:
self.info[upperASCII(key)] = value
def unset(self, *keys):
for key in (upperASCII(k) for k in keys):
if key in self.info:
del self.info[key]
def get(self, key):
return self.info.get(upperASCII(key), "")
def _parseline(self, line):
""" parse a line into a key, value and comment
:param str line: Line to be parsed
:returns: Tuple of key, value, comment
:rtype: tuple
Handle comments and optionally unquote quoted strings
Returns (key, value, comment) or (None, None, comment)
key is always UPPERCASE and comment may by "" if none was found.
"""
s = line.strip()
# Look for a # outside any quotes
comment = ""
comment_index = find_comment(s)
if comment_index is not None:
comment = s[comment_index:]
s = s[:comment_index] # remove from comment to EOL
key, eq, val = s.partition('=')
key = key.strip()
val = val.strip()
if self.read_unquote:
val = unquote(val)
if key != '' and eq == '=':
return (upperASCII(key), val, comment)
else:
return (None, None, comment)
def _kvpair(self, key, comment=""):
value = self.info[key]
if self.write_quote or self.always_quote:
value = quote(value, self.always_quote)
if comment:
comment = " " + comment
return key + '=' + value + comment + "\n"
def __str__(self):
""" Return the file that was read, replacing existing keys with new values
removing keys that have been deleted and adding new keys.
"""
oldkeys = []
s = ""
for line in self._lines:
key, _value, comment = self._parseline(line)
if key is None:
s += line
else:
if key not in self.info:
continue
oldkeys.append(key)
s += self._kvpair(key, comment)
# Add new keys
for key in self.info:
if key not in oldkeys:
s += self._kvpair(key)
return s
| gpl-2.0 | 6,305,620,812,301,647,000 | 32.805 | 82 | 0.576838 | false |
vrv/tensorflow | tensorflow/contrib/distributions/python/ops/poisson.py | 25 | 5414 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Poisson distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Poisson",
]
_poisson_sample_note = """
Note that the input value must be a non-negative floating point tensor with
dtype `dtype` and whose shape can be broadcast with `self.rate`. `x` is only
legal if it is non-negative and its components are equal to integer values.
"""
class Poisson(distribution.Distribution):
"""Poisson distribution.
The Poisson distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z
Z = exp(lambda).
```
where `rate = lambda` and `Z` is the normalizing constant.
"""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Poisson"):
"""Initialize a batch of Poisson distributions.
Args:
rate: Floating point tensor, the rate parameter of the
distribution(s). `rate` must be positive.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[rate]):
with ops.control_dependencies([check_ops.assert_positive(rate)] if
validate_args else []):
self._rate = array_ops.identity(rate, name="rate")
super(Poisson, self).__init__(
dtype=self._rate.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._rate],
name=name)
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.shape(self.rate)
def _batch_shape(self):
return self.rate.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
if self.validate_args:
# We set `check_integer=False` since the CDF is defined on whole real
# line.
x = distribution_util.embed_check_nonnegative_discrete(
x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_discrete(
x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _sample_n(self, n, seed=None):
return random_ops.random_poisson(
self.rate, [n], dtype=self.dtype, seed=seed)
| apache-2.0 | 7,096,861,138,893,395,000 | 33.484076 | 80 | 0.679165 | false |
brownshang/Flask-User | example_apps/multi_email_app.py | 10 | 4507 | import os
from flask import Flask, render_template_string
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, UserManager, UserMixin, SQLAlchemyAdapter
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///multi_email_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
USER_ENABLE_MULTIPLE_EMAILS = True
def create_app():
""" Flask application factory """
# Setup Flask app and app.config
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
mail = Mail(app) # Initialize Flask-Mail
# Define the User data model. Make sure to add flask.ext.user UserMixin !!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationship
user_emails = db.relationship('UserEmail')
# Define UserEmail DataModel.
class UserEmail(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
is_primary = db.Column(db.Boolean(), nullable=False, default=False)
# Relationship
user = db.relationship('User', uselist=False)
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserEmailClass=UserEmail) # Register the User model
user_manager = UserManager(db_adapter, app) # Initialize Flask-User
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
| bsd-2-clause | -4,549,811,117,221,769,700 | 38.884956 | 103 | 0.592856 | false |
noroutine/ansible | lib/ansible/modules/windows/win_ping.py | 34 | 1959 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module
description:
- Checks management connectivity of a windows host.
- This is NOT ICMP ping, this is just a trivial test module.
- For non-Windows targets, use the M(ping) module instead.
options:
data:
description:
- Alternate data to return instead of 'pong'.
- If this parameter is set to C(crash), the module will cause an exception.
default: pong
notes:
- For non-Windows targets, use the M(ping) module instead.
author:
- Chris Church (@cchurch)
'''
EXAMPLES = r'''
# Test connectivity to a windows host
# ansible winserver -m win_ping
# Example from an Ansible Playbook
- win_ping:
# Induce an exception to see what happens
- win_ping:
data: crash
'''
RETURN = '''
ping:
description: value provided with the data parameter
returned: success
type: string
sample: pong
'''
| gpl-3.0 | 5,661,858,080,797,863,000 | 28.238806 | 81 | 0.701889 | false |
osm-fr/osmose-backend | analysers/analyser_osmosis_highway_without_ref.py | 4 | 2575 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2017 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Osmosis import Analyser_Osmosis
sql10 = """
SELECT DISTINCT
ways.id,
ST_AsText(way_locate(linestring))
FROM
highways AS ways
LEFT JOIN relation_members ON
relation_members.member_type = 'W' AND
relation_members.member_id = ways.id
LEFT JOIN relations ON
relations.id = relation_members.relation_id AND
relations.tags ?| ARRAY['noref', 'ref', 'nat_ref', 'int_ref']
WHERE
ways.highway = 'motorway' AND
NOT ways.tags ?| ARRAY['noref', 'ref', 'nat_ref', 'int_ref'] AND
relations.id IS NULL
"""
class Analyser_Osmosis_Highway_Without_Ref(Analyser_Osmosis):
requires_tables_common = ['highways']
def __init__(self, config, logger = None):
Analyser_Osmosis.__init__(self, config, logger)
self.classs[20804] = self.def_class(item = 2080, level = 2, tags = ['highway', 'ref', 'fix:chair'],
title = T_('Motorway without ref, nat_ref, int_ref or noref tag'))
self.callback10 = lambda res: {"class":20804, "data":[self.way_full, self.positionAsText]}
def analyser_osmosis_common(self):
self.run(sql10, self.callback10)
| gpl-3.0 | 6,272,077,371,413,751,000 | 44.140351 | 107 | 0.512243 | false |
kevin-intel/scikit-learn | examples/release_highlights/plot_release_highlights_0_22_0.py | 6 | 10115 | """
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates :class:`~metrics.plot_roc_curve`,
# but other plots utilities are supported like
# :class:`~inspection.plot_partial_dependence`,
# :class:`~metrics.plot_precision_recall_curve`, and
# :class:`~metrics.plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
svc_disp = plot_roc_curve(svc, X_test, y_test)
rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
('svr', make_pipeline(StandardScaler(),
LinearSVC(random_state=42)))
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression()
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42
)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f'x_{i}' for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=feature_names[sorted_idx])
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.ensemble import HistGradientBoostingClassifier
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.neighbors import KNeighborsTransformer
from sklearn.manifold import Isomap
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode='distance'),
Isomap(n_neighbors=10, metric='precomputed'),
memory=tmpdir)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~metrics.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print("Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print("Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml('titanic', version=1, as_frame=True)
print(titanic.data.head()[['pclass', 'embarked']])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape='ovo', probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class='ovo'))
| bsd-3-clause | 421,730,285,637,798,300 | 36.462963 | 79 | 0.719525 | false |
Scarygami/mirror-api-examples | colours-of-the-world/tasks.py | 1 | 8959 | #!/usr/bin/python
# Copyright (C) 2013 Gerwin Sturm, FoldedSoft e.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create tasks and evaluate submissions, implemented as taskqueues to work in the background"""
from auth import get_auth_service
from models import Submission
from utils import base_url
import cStringIO
import Image
import numpy
import logging
import random
import webapp2
from apiclient.errors import HttpError
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api.images import get_serving_url
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from oauth2client.client import AccessTokenRefreshError
COLORS = {
"red": {"name": "Red", "hue": 0, "min": -15, "max": 15},
"orange": {"name": "Orange", "hue": 30, "min": 15, "max": 45},
"yellow": {"name": "Yellow", "hue": 60, "min": 40, "max": 90},
"green": {"name": "Green", "hue": 120, "min": 90, "max": 180},
"blue": {"name": "Blue", "hue": 240, "min": 180, "max": 260},
"indigo": {"name": "Indigo", "hue": 280, "min": 250, "max": 310},
"violet": {"name": "Violet", "hue": 320, "min": 300, "max": 345}
}
SOURCE_ITEM_ID = "colours_of_the_world_task"
class CreateTaskWorker(webapp2.RequestHandler):
"""
Creates a new task for a user
"""
def post(self):
gplus_id = self.request.get("user")
test = self.request.get("test")
if test == "" or test == "None":
test = None
service = get_auth_service(gplus_id, test)
if service is None:
logging.error("User not authenticated")
return
if test is not None:
user = ndb.Key("TestUser", gplus_id).get()
else:
user = ndb.Key("User", gplus_id).get()
if user is None:
logging.error("User not found")
return
col = random.choice(COLORS.keys())
user.currentTask = col
user.put()
card = {
"html": ("<article class=\"photo\">"
" <img src=\"" + base_url + "/images/" + col + ".png\" width=\"100%\" height=\"100%\">"
" <div class=\"photo-overlay\"></div>"
" <section>"
" <p class=\"text-auto-size\">Current Task: " + COLORS[col]["name"] + "</p>"
" </section>"
"</article>"),
"menuItems": [
{
"action": "CUSTOM",
"id": "giveup",
"values": [{
"displayName": "Give Up",
"iconUrl": "https://mirror-api.appspot.com/glass/images/error.png"
}]
},
{
"action": "TOGGLE_PINNED"
}
],
"sourceItemId": SOURCE_ITEM_ID
}
result = service.timeline().list(sourceItemId=SOURCE_ITEM_ID).execute()
if "items" in result and len(result["items"]) > 0:
request = service.timeline().update(id=result["items"][0]["id"], body=card)
else:
request = service.timeline().insert(body=card)
try:
request.execute()
except AccessTokenRefreshError:
logging.error("Failed to refresh access token.")
return
except HttpError as e:
logging.error("Failed to execute request. %s" % e)
return
class EvaluateWorker(webapp2.RequestHandler):
"""
Creates a new task for a user
"""
def post(self):
gplus_id = self.request.get("user")
test = self.request.get("test")
if test == "" or test == "None":
test = None
item_id = self.request.get("item")
service = get_auth_service(gplus_id, test)
if service is None:
logging.error("No valid credentials")
return
if test is not None:
user = ndb.Key("TestUser", gplus_id).get()
else:
user = ndb.Key("User", gplus_id).get()
if user.currentTask is None:
logging.info("User has no current task")
return
item = service.timeline().get(id=item_id).execute()
imageId = None
if "attachments" in item:
for att in item["attachments"]:
if att["contentType"].startswith("image/"):
imageId = att["id"]
break
if imageId is None:
logging.info("No suitable attachment")
return
attachment_metadata = service.timeline().attachments().get(
itemId=item["id"], attachmentId=imageId).execute()
content_url = attachment_metadata.get("contentUrl")
content_type = attachment_metadata.get("contentType")
resp, content = service._http.request(content_url)
if resp.status != 200:
logging.info("Couldn't fetch attachment")
tempimg = cStringIO.StringIO(content)
i = Image.open(tempimg).convert("RGB").resize((200, 200), Image.ANTIALIAS)
a = numpy.asarray(i, int)
R, G, B = a.T
m = numpy.min(a, 2).T
M = numpy.max(a, 2).T
#Chroma
C = M - m
Cmsk = C != 0
# Hue
H = numpy.zeros(R.shape, int)
mask = (M == R) & Cmsk
H[mask] = numpy.mod(60*(G-B)/C, 360)[mask]
mask = (M == G) & Cmsk
H[mask] = (60*(B-R)/C + 120)[mask]
mask = (M == B) & Cmsk
H[mask] = (60*(R-G)/C + 240)[mask]
# Value
V = M
# Saturation
S = numpy.zeros(R.shape, int)
S[Cmsk] = ((255*C)/V)[Cmsk]
mask = (V > 100) & (S > 100)
count = {}
for col in COLORS:
count[col] = 0
v1 = COLORS[col]["min"]
v2 = COLORS[col]["max"]
if (v1 < 0):
col_mask = ((H < v2) | (H > 360 + v1)) & mask
else:
col_mask = ((H > v1) & (H < v2)) & mask
Col = numpy.zeros(R.shape, int)
Col[col_mask] = numpy.ones(R.shape, int)[col_mask]
count[col] = numpy.count_nonzero(Col)
sum = 0
for col in count:
if count[col] < 1000:
count[col] = 0
else:
sum = sum + count[col]
if sum == 0:
item["text"] = "No colours recognized."
service.timeline().update(id=item_id, body=item).execute()
return
recognized = []
correct = False
task = user.currentTask
for col in count:
count[col] = count[col] * 100 / sum
if count[col] > 40:
if col == task:
correct = True
break
recognized.append(col)
if correct:
item["text"] = "Congratulations!"
service.timeline().update(id=item_id, body=item).execute()
user.currentTask = None
user.put()
# Insert submission
file_name = files.blobstore.create(mime_type=content_type)
with files.open(file_name, 'a') as f:
f.write(content)
files.finalize(file_name)
blob_key = files.blobstore.get_blob_key(file_name)
url = get_serving_url(blob_key, secure_url=True, size=640)
submission = Submission(colour=task,
hue=COLORS[task]["hue"],
blobkey=blob_key,
url=url,
parent=user.key)
submission.put()
# TODO: Update scores/achievements
# Create next task
taskqueue.add(url="/tasks/createtask",
params={"user": gplus_id, "test": test},
method="POST")
else:
if len(recognized) == 0:
item["text"] = "No colours recognized."
else:
item["text"] = "Recognized " + ", ".join(recognized) + " but your current task is " + user.currentTask
service.timeline().update(id=item_id, body=item).execute()
TASK_ROUTES = [
("/tasks/createtask", CreateTaskWorker),
("/tasks/evaluate", EvaluateWorker)
]
| apache-2.0 | 522,482,538,929,888,960 | 31.34296 | 118 | 0.511776 | false |
gordon-/yolo | learning/models.py | 1 | 10361 | from datetime import timedelta, date
from django.db import models
from django.contrib.auth.models import User, Group
from django.utils import timezone
from django.utils.text import slugify
from polymorphic import PolymorphicModel
from .validators import colorValidator
from .eventview import EventView
class Member(models.Model):
user = models.OneToOneField(User, verbose_name='utilisateur',
related_name='member')
avatar = models.ImageField(upload_to='avatar', null=True, blank=True)
type = models.CharField(max_length=10, choices=(('student', 'élève'),
('teacher', 'formateur'),
('admin', 'administrateur')
),
verbose_name='type')
tags = models.ManyToManyField('Tag', related_name='members',
verbose_name='tags', blank=True)
def __str__(self):
return self.user.__str__()
class Meta:
verbose_name = 'Membre'
class Tag(models.Model):
name = models.CharField(max_length=50, verbose_name='nom', unique=True)
color = models.CharField(max_length=7, verbose_name='couleur',
validators=[colorValidator])
def __str__(self):
return self.name
class Meta:
verbose_name = 'tag'
def to_css(self):
"""
Returns the slugged name, for CSS class integration
"""
return slugify(self.name)
# Resource models
class Formation(models.Model):
name = models.CharField(max_length=50, verbose_name='nom', unique=True)
description = models.TextField(verbose_name='description', blank=True)
days_count = models.PositiveIntegerField(verbose_name='nombre de jours')
objectives = models.ManyToManyField('Objective', verbose_name='objectifs',
related_name='formations')
tags = models.ManyToManyField('Tag', verbose_name='tags',
related_name='formations')
def __str__(self):
return self.name
class Meta:
verbose_name = 'formation'
class Objective(models.Model):
name = models.CharField(max_length=50, verbose_name='nom', unique=True)
points = models.PositiveIntegerField(verbose_name='nombre de points')
description = models.TextField(verbose_name='description', blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'objectif'
class Course(PolymorphicModel):
name = models.CharField(max_length=50, verbose_name='nom', unique=True)
description = models.TextField(verbose_name='description', blank=True)
standard_duration = models.DurationField(verbose_name='durée standard',
default=timedelta(days=1))
dependencies = models.ManyToManyField('Objective',
verbose_name='dépendances',
related_name='dependant_courses',
blank=True)
objectives = models.ManyToManyField('Objective',
verbose_name='objectifs apportés',
related_name='courses',
blank=True)
tag = models.ForeignKey('Tag', verbose_name='tag', related_name='courses')
def __str__(self):
return self.name
def is_owned(self, user, perm):
if perm == 'learning.view_course':
try:
attribution = self.course_attributions.get(
promotion__group__in=user.groups.all())
return attribution.begin < timezone.now()
except CourseAttribution.DoesNotExist:
return False
return True
def course_type(self):
return 'Course'
class Meta:
verbose_name = 'cours'
verbose_name_plural = 'cours'
permissions = (('view_course', 'Can view a course'), )
class LectureCourse(Course):
content = models.TextField(verbose_name='contenu')
def course_type(self):
return 'Lecture'
class Meta:
verbose_name = 'cours théorique'
class LectureFile(models.Model):
course = models.ForeignKey('LectureCourse', verbose_name='cours',
related_name='files')
file = models.FileField(upload_to='lectures')
type = models.CharField(max_length=50, verbose_name='type MIME de fichier')
def save(self, *args, **kwargs):
"""
fetching filetype
"""
return super().save(*args, **kwargs)
class Meta:
verbose_name = 'support de cours'
verbose_name_plural = 'supports de cours'
class QuestionCourse(Course):
def course_type(self):
return 'Question'
class Meta:
verbose_name = 'questionnaire'
class Question(models.Model):
course = models.ForeignKey('QuestionCourse', verbose_name='questionnaire',
related_name='questions')
label = models.CharField(max_length=50, verbose_name='intitulé')
order = models.PositiveIntegerField(verbose_name='ordre')
question_type = models.CharField(max_length=10,
choices=(('mono', 'une seule réponse'),
('multi', 'plusieurs réponses'),
),
verbose_name='type de question')
def __str__(self):
return '{course} : {question}'.format(course=self.course.name,
question=self.label)
class Meta:
verbose_name = 'question'
unique_together = (('course', 'order'), )
class Answer(models.Model):
question = models.ForeignKey('Question')
label = models.CharField(max_length=50, verbose_name='réponse')
order = models.PositiveIntegerField(verbose_name='ordre')
valid = models.BooleanField(verbose_name='réponse vraie', default=False)
def __str__(self):
return '{question} — {answer}'.format(question=self.question.__str__(),
answer=self.label)
class Meta:
verbose_name = 'réponse'
unique_together = (('question', 'order'), )
class PracticeCourse(Course):
instructions = models.TextField(verbose_name='instructions')
repository = models.URLField(verbose_name='dépôt')
def course_type(self):
return 'Practice'
class Meta:
verbose_name = 'cours pratique'
# Calendar attribution models
class Promotion(models.Model):
name = models.CharField(max_length=50, verbose_name='promotion',
unique=True)
group = models.OneToOneField(Group, verbose_name='groupe',
related_name='promotion')
image = models.ImageField(upload_to='promotion', verbose_name='avatar de '
'la promotion')
formation_type = models.ForeignKey('Formation', verbose_name='type de '
'formation', related_name='promotions')
def __str__(self):
return self.name
def calendars(self):
""" returns a list of Calendar objects (list of weeks, represented by
DateTime/lists of events tuples) with all events for the current
Promotion.
"""
ev = EventView(self.courses.all(), lambda c: c.begin.date(),
lambda c: c.end.date())
attributes = {day.day: day.attributes() for day in self.days.all()}
ev.add_days_attributes(attributes)
return ev.calendar()
class Meta:
verbose_name = 'promotion'
class DayAttribution(models.Model):
promotion = models.ForeignKey('Promotion', verbose_name='promotion',
related_name='days')
assigned = models.ForeignKey('Member', verbose_name='formateur',
related_name='days')
day = models.DateField(verbose_name='jour')
tag = models.ForeignKey('Tag', verbose_name='tag', related_name='days')
def attributes(self):
"""
Returns a dict of attributes for the current day.
"""
attrs = {}
attrs['active'] = True
attrs['today'] = self.day == date.today()
attrs['assigned'] = self.assigned
attrs['tag'] = self.tag
return attrs
def __str__(self):
return '{promotion} : {day}'.format(promotion=self.promotion.name,
day=self.day)
class Meta:
verbose_name = 'attribution de jour'
verbose_name_plural = 'attributions de jours'
class ObjectiveAttribution(models.Model):
promotion = models.ForeignKey('Promotion', verbose_name='promotion',
related_name='objectives')
objective = models.ForeignKey('Objective', verbose_name='objectif',
related_name='attributed_courses')
day = models.DateField(verbose_name='jour')
def __str__(self):
return '{promotion} — {objective}'.\
format(promotion=self.promotion.name,
objective=self.objective.name)
class Meta:
verbose_name = 'attribution d’objectif'
verbose_name_plural = 'attributions d’objectifs'
unique_together = (('promotion', 'objective'), )
class CourseAttribution(models.Model):
promotion = models.ForeignKey('Promotion', verbose_name='promotion',
related_name='courses')
course = models.ForeignKey('Course', verbose_name='cours',
related_name='course_attributions')
begin = models.DateTimeField(verbose_name='date et heure de début')
end = models.DateTimeField(verbose_name='date et heure de fin')
def __str__(self):
return '{promotion} — {course}'.format(promotion=self.promotion.name,
course=self.course.name)
class Meta:
verbose_name = 'attribution de cours'
verbose_name_plural = 'attributions de cours'
unique_together = (('promotion', 'course'), )
| agpl-3.0 | 1,879,368,670,769,459,500 | 34.376712 | 79 | 0.57425 | false |
unixhot/opencmdb | django_mongoengine/mongo_admin/templatetags/documenttags.py | 2 | 2214 | from django.template import Library
from django.contrib.admin.templatetags.admin_list import (result_hidden_fields, ResultList, items_for_result,
result_headers)
from django.db.models.fields import FieldDoesNotExist
from django_mongoengine.forms.utils import patch_document
register = Library()
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.name)
def results(cl):
"""
Just like the one from Django. Only we add a serializable_value method to
the document, because Django expects it and mongoengine doesn't have it.
"""
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
patch_document(serializable_value, res)
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
patch_document(serializable_value, res)
yield ResultList(None, items_for_result(cl, res, None))
def document_result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
try:
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
except KeyError:
pass
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(document_result_list)
| apache-2.0 | -1,069,084,826,514,123,900 | 35.9 | 109 | 0.648148 | false |
twchoi/tmp-brunet-deetoo | tests/loganalysis/chronilog.py | 3 | 2226 | #!/usr/bin/env python
#
# This takes a Brunet connection log and parses it into a graph
# The current version takes the whole log.
#
import sys, time, copy, stats
from datetime import timedelta, datetime
infilename = sys.argv[1]
mapfilename = infilename + '.address_map'
ifile = open( infilename, 'r') # r for reading
mapfile = open( mapfilename, 'w') # w for writing
time_to_data_list = {}
ah_addresses = []
tmp_local_address = 0
rand_address_to_sequential_int = {}
for line in ifile:
parsed_line = line.split()
if parsed_line[0] == 'local_address' :
tmp_local_address = int(parsed_line[1])
rand_address_to_sequential_int[tmp_local_address]= 1
else :
if len( parsed_line) > 4:
tmp_date = parsed_line[0]
tmp_time = parsed_line[1]
p_d = tmp_date.split('/')
p_t = tmp_time.split(':')
year = int(p_d[2])
month = int(p_d[0])
day = int(p_d[1])
hour = int(p_t[0])
minute = int(p_t[1])
second = int(p_t[2])
microsecond = 1000*int(p_t[3])
tmp_time = datetime(year,month,day,hour,minute,second,microsecond)
tmp_data = []
tmp_data.append(parsed_line[2])
tmp_data.append(parsed_line[3])
tmp_data.append(tmp_local_address)
tmp_data.append( int(parsed_line[4]) )
if tmp_time in time_to_data_list:
tmp_existing_list = time_to_data_list[tmp_time]
tmp_existing_list.append(tmp_data)
else:
tmp_new_list = []
tmp_new_list.append(tmp_data)
time_to_data_list[tmp_time] = tmp_new_list
brunet_addresses = rand_address_to_sequential_int.keys()
brunet_addresses.sort()
tmp_int = 0
for b_add in brunet_addresses:
rand_address_to_sequential_int[b_add]= tmp_int
tmp_int = tmp_int + 1
hash_invert = {}
for b_add in brunet_addresses:
hash_invert[rand_address_to_sequential_int[b_add] ] = b_add
inverted = hash_invert.keys()
for sm_int in inverted:
mapfile.write("%i ---> %i\n" % (sm_int,hash_invert[sm_int]) )
timestamps = time_to_data_list.keys()
timestamps.sort()
for time_it in timestamps:
for tr in time_to_data_list[time_it]:
print time_it,tr[0],tr[1],rand_address_to_sequential_int[tr[2]],rand_address_to_sequential_int[tr[3]]
| gpl-2.0 | 1,403,455,101,548,990,200 | 29.081081 | 105 | 0.636568 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.