repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
chinmaygarde/mojo | sky/engine/bindings/scripts/dart_compiler.py | 14 | 4699 | #!/usr/bin/python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compile an .idl file to Blink C++ bindings (.h and .cpp files) for Dart:HTML.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
import os.path
import cPickle as pickle
from idl_reader import IdlReader
from utilities import write_file
def idl_filename_to_interface_name(idl_filename):
basename = os.path.basename(idl_filename)
interface_name, _ = os.path.splitext(basename)
return interface_name
class IdlCompiler(object):
"""Abstract Base Class for IDL compilers.
In concrete classes:
* self.code_generator must be set, implementing generate_code()
(returning a list of output code), and
* compile_file() must be implemented (handling output filenames).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, output_directory, code_generator=None,
interfaces_info=None, interfaces_info_filename='',
only_if_changed=False):
"""
Args:
interfaces_info:
interfaces_info dict
(avoids auxiliary file in run-bindings-tests)
interfaces_info_file: filename of pickled interfaces_info
"""
self.code_generator = code_generator
if interfaces_info_filename:
with open(interfaces_info_filename) as interfaces_info_file:
interfaces_info = pickle.load(interfaces_info_file)
self.interfaces_info = interfaces_info
self.only_if_changed = only_if_changed
self.output_directory = output_directory
self.reader = IdlReader(interfaces_info, output_directory)
def compile_and_write(self, idl_filename, output_filenames):
interface_name = idl_filename_to_interface_name(idl_filename)
idl_pickle_filename = os.path.join(self.output_directory,
'%s_globals.pickle' % interface_name)
definitions = self.reader.read_idl_definitions(idl_filename)
output_code_list = self.code_generator.generate_code(definitions,
interface_name,
idl_filename,
idl_pickle_filename,
self.only_if_changed)
for output_code, output_filename in zip(output_code_list, output_filenames):
write_file(output_code, output_filename, self.only_if_changed)
def generate_global_and_write(self, global_entries, output_filenames):
output_code_list = self.code_generator.generate_globals(global_entries)
for output_code, output_filename in zip(output_code_list, output_filenames):
write_file(output_code, output_filename, self.only_if_changed)
def generate_dart_blink_and_write(self, global_entries, output_filename):
output_code = self.code_generator.generate_dart_blink(global_entries)
write_file(output_code, output_filename, self.only_if_changed)
@abc.abstractmethod
def compile_file(self, idl_filename):
pass
| bsd-3-clause |
solus-project/package-management | tests/archivetests.py | 3 | 1918 | import pisi
import unittest
from pisi import util
from pisi import uri
from pisi import archive
from pisi import sourcearchive
from pisi import fetcher
from pisi.specfile import SpecFile
from os.path import join, exists
class ArchiveTestCase(unittest.TestCase):
def testTarUnpack(self):
spec = SpecFile('repos/pardus-2007/system/base/curl/pspec.xml')
targetDir = '/tmp/tests'
archives = sourcearchive.SourceArchives(spec)
archives.unpack(targetDir)
for archive in spec.source.archive:
assert archive.type == 'targz'
def testUnpackTarCond(self):
spec = SpecFile('repos/pardus-2007/system/base/curl/pspec.xml')
targetDir = '/tmp'
archives = sourcearchive.SourceArchives(spec)
for archive in spec.source.archive:
url = uri.URI(archive.uri)
filePath = join(pisi.context.config.archives_dir(), url.filename())
if util.sha1_file(filePath) != archive.sha1sum:
fetch = fetcher.Fetcher(archive.uri, targetDir)
fetch.fetch()
assert archive.type == 'targz'
def testZipUnpack(self):
spec = SpecFile('repos/pardus-2007/system/base/openssl/pspec.xml')
targetDir = '/tmp/tests'
archives = sourcearchive.SourceArchives(spec)
archives.fetch()
archives.unpack(targetDir)
assert not exists(targetDir + '/openssl')
def testMakeZip(self):
spec = SpecFile('repos/pardus-2007/system/base/openssl/pspec.xml')
targetDir = '/tmp/tests'
archives = sourcearchive.SourceArchives(spec)
archives.fetch(interactive = False)
archives.unpack(targetDir, clean_dir=True)
del archives
newDir = targetDir + '/newZip'
zip = archive.ArchiveZip(newDir, 'zip', 'w')
sourceDir = '/tmp/pisi-root'
zip.add_to_archive(sourceDir)
zip.close()
| gpl-2.0 |
lpirl/ansible | lib/ansible/utils/module_docs_fragments/validate.py | 366 | 1146 | # Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
validate:
required: false
description:
- The validation command to run before copying into place. The path to the file to
validate is passed in via '%s' which must be present as in the example below.
The command is passed securely so shell features like expansion and pipes won't work.
default: None
'''
| gpl-3.0 |
elainexmas/boto | boto/machinelearning/exceptions.py | 127 | 1596 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InternalServerException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class IdempotentParameterMismatchException(BotoServerError):
pass
class ResourceInUseException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class PredictorNotMountedException(BotoServerError):
pass
class InvalidInputException(BotoServerError):
pass
| mit |
2ndQuadrant/ansible | lib/ansible/modules/monitoring/sensu_handler.py | 52 | 9195 | #!/usr/bin/python
# (c) 2017, Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_handler
author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu handler configuration
version_added: 2.4
description:
- Manages Sensu handler configuration
- 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
options:
state:
description:
- Whether the handler should be present or not
choices: [ 'present', 'absent' ]
default: present
name:
description:
- A unique name for the handler. The name cannot contain special characters or spaces.
required: True
type:
description:
- The handler type
choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
required: True
filter:
description:
- The Sensu event filter (name) to use when filtering events for the handler.
filters:
description:
- An array of Sensu event filters (names) to use when filtering events for the handler.
- Each array item must be a string.
severities:
description:
- An array of check result severities the handler will handle.
- 'NOTE: event resolution bypasses this filtering.'
choices: [ 'warning', 'critical', 'unknown' ]
mutator:
description:
- The Sensu event mutator (name) to use to mutate event data for the handler.
timeout:
description:
- The handler execution duration timeout in seconds (hard stop).
- Only used by pipe and tcp handler types.
default: 10
handle_silenced:
description:
- If events matching one or more silence entries should be handled.
type: bool
default: 'no'
handle_flapping:
description:
- If events in the flapping state should be handled.
type: bool
default: 'no'
command:
description:
- The handler command to be executed.
- The event data is passed to the process via STDIN.
- 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
socket:
description:
- The socket definition scope, used to configure the TCP/UDP handler socket.
- 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
pipe:
description:
- The pipe definition scope, used to configure the Sensu transport pipe.
- 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
handlers:
description:
- An array of Sensu event handlers (names) to use for events using the handler set.
- Each array item must be a string.
- 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
notes:
- Check mode is supported
'''
EXAMPLES = '''
# Configure a handler that sends event data as STDIN (pipe)
- name: Configure IRC Sensu handler
sensu_handler:
name: "irc_handler"
type: "pipe"
command: "/usr/local/bin/notify-irc.sh"
severities:
- "ok"
- "critical"
- "warning"
- "unknown"
timeout: 15
notify:
- Restart sensu-client
- Restart sensu-server
# Delete a handler
- name: Delete IRC Sensu handler
sensu_handler:
name: "irc_handler"
state: "absent"
# Example of a TCP handler
- name: Configure TCP Sensu handler
sensu_handler:
name: "tcp_handler"
type: "tcp"
timeout: 30
socket:
host: "10.0.1.99"
port: 4444
register: handler
notify:
- Restart sensu-client
- Restart sensu-server
- name: Secure Sensu handler configuration file
file:
path: "{{ handler['file'] }}"
owner: "sensu"
group: "sensu"
mode: "0600"
'''
RETURN = '''
config:
description: Effective handler configuration, when state is present
returned: success
type: dict
sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
file:
description: Path to the handler configuration file
returned: success
type: str
sample: "/etc/sensu/conf.d/handlers/irc.json"
name:
description: Name of the handler
returned: success
type: str
sample: "irc"
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
name=dict(type='str', required=True),
type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
filter=dict(type='str', required=False),
filters=dict(type='list', required=False),
severities=dict(type='list', required=False),
mutator=dict(type='str', required=False),
timeout=dict(type='int', required=False, default=10),
handle_silenced=dict(type='bool', required=False, default=False),
handle_flapping=dict(type='bool', required=False, default=False),
command=dict(type='str', required=False),
socket=dict(type='dict', required=False),
pipe=dict(type='dict', required=False),
handlers=dict(type='list', required=False),
),
required_if=[
['state', 'present', ['type']],
['type', 'pipe', ['command']],
['type', 'tcp', ['socket']],
['type', 'udp', ['socket']],
['type', 'transport', ['pipe']],
['type', 'set', ['handlers']]
]
)
state = module.params['state']
name = module.params['name']
path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
if state == 'absent':
if os.path.exists(path):
if module.check_mode:
msg = '{path} would have been deleted'.format(path=path)
module.exit_json(msg=msg, changed=True)
else:
try:
os.remove(path)
msg = '{path} deleted successfully'.format(path=path)
module.exit_json(msg=msg, changed=True)
except OSError as e:
msg = 'Exception when trying to delete {path}: {exception}'
module.fail_json(
msg=msg.format(path=path, exception=str(e)))
else:
# Idempotency: it's okay if the file doesn't exist
msg = '{path} already does not exist'.format(path=path)
module.exit_json(msg=msg)
# Build handler configuration from module arguments
config = {'handlers': {name: {}}}
args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
'handle_silenced', 'handle_flapping', 'command', 'socket',
'pipe', 'handlers']
for arg in args:
if arg in module.params and module.params[arg] is not None:
config['handlers'][name][arg] = module.params[arg]
# Load the current config, if there is one, so we can compare
current_config = None
try:
current_config = json.load(open(path, 'r'))
except (IOError, ValueError):
# File either doesn't exist or it's invalid JSON
pass
if current_config is not None and current_config == config:
# Config is the same, let's not change anything
module.exit_json(msg='Handler configuration is already up to date',
config=config['handlers'][name],
file=path,
name=name)
# Validate that directory exists before trying to write to it
if not module.check_mode and not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
str(e)))
if module.check_mode:
module.exit_json(msg='Handler configuration would have been updated',
changed=True,
config=config['handlers'][name],
file=path,
name=name)
try:
with open(path, 'w') as handler:
handler.write(json.dumps(config, indent=4))
module.exit_json(msg='Handler configuration updated',
changed=True,
config=config['handlers'][name],
file=path,
name=name)
except (OSError, IOError) as e:
module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
str(e)))
if __name__ == '__main__':
main()
| gpl-3.0 |
arahuja/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 14 | 6123 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
tquilian/exelearningTest | twisted/trial/test/detests.py | 98 | 4765 | from __future__ import generators
from twisted.trial import unittest
from twisted.internet import defer, threads, reactor
class DeferredSetUpOK(unittest.TestCase):
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self._setUpCalled = True
def test_ok(self):
self.failUnless(self._setUpCalled)
class DeferredSetUpFail(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(unittest.FailTest('i fail'))
def test_ok(self):
DeferredSetUpFail.testCalled = True
self.fail("I should not get called")
class DeferredSetUpCallbackFail(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self.fail('deliberate failure')
def test_ok(self):
DeferredSetUpCallbackFail.testCalled = True
class DeferredSetUpError(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(RuntimeError('deliberate error'))
def test_ok(self):
DeferredSetUpError.testCalled = True
class DeferredSetUpNeverFire(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.Deferred()
def test_ok(self):
DeferredSetUpNeverFire.testCalled = True
class DeferredSetUpSkip(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb1)
return d
def _cb1(self, ignored):
raise unittest.SkipTest("skip me")
def test_ok(self):
DeferredSetUpSkip.testCalled = True
class DeferredTests(unittest.TestCase):
touched = False
def _cb_fail(self, reason):
self.fail(reason)
def _cb_error(self, reason):
raise RuntimeError(reason)
def _cb_skip(self, reason):
raise unittest.SkipTest(reason)
def _touchClass(self, ignored):
self.__class__.touched = True
def setUp(self):
self.__class__.touched = False
def test_pass(self):
return defer.succeed('success')
def test_passGenerated(self):
self._touchClass(None)
yield None
test_passGenerated = defer.deferredGenerator(test_passGenerated)
def test_fail(self):
return defer.fail(self.failureException('I fail'))
def test_failureInCallback(self):
d = defer.succeed('fail')
d.addCallback(self._cb_fail)
return d
def test_errorInCallback(self):
d = defer.succeed('error')
d.addCallback(self._cb_error)
return d
def test_skip(self):
d = defer.succeed('skip')
d.addCallback(self._cb_skip)
d.addCallback(self._touchClass)
return d
def test_thread(self):
return threads.deferToThread(lambda : None)
def test_expectedFailure(self):
d = defer.succeed('todo')
d.addCallback(self._cb_error)
return d
test_expectedFailure.todo = "Expected failure"
class TimeoutTests(unittest.TestCase):
timedOut = None
def test_pass(self):
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
test_pass.timeout = 2
def test_passDefault(self):
# test default timeout
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
def test_timeout(self):
return defer.Deferred()
test_timeout.timeout = 0.1
def test_timeoutZero(self):
return defer.Deferred()
test_timeoutZero.timeout = 0
def test_expectedFailure(self):
return defer.Deferred()
test_expectedFailure.timeout = 0.1
test_expectedFailure.todo = "i will get it right, eventually"
def test_skip(self):
return defer.Deferred()
test_skip.timeout = 0.1
test_skip.skip = "i will get it right, eventually"
def test_errorPropagation(self):
def timedOut(err):
self.__class__.timedOut = err
return err
d = defer.Deferred()
d.addErrback(timedOut)
return d
test_errorPropagation.timeout = 0.1
def test_calledButNeverCallback(self):
d = defer.Deferred()
def neverFire(r):
return defer.Deferred()
d.addCallback(neverFire)
d.callback(1)
return d
test_calledButNeverCallback.timeout = 0.1
class TestClassTimeoutAttribute(unittest.TestCase):
timeout = 0.2
def setUp(self):
self.d = defer.Deferred()
def testMethod(self):
self.methodCalled = True
return self.d
| gpl-2.0 |
tombh/Topotatolog | settings.py | 1 | 2973 | import os
import sys
sys.path.insert(
0,
os.path.join(
os.path.dirname(__file__),
"blog/lib"
)
)
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
SITE_ID = '76'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.comments',
'django.contrib.sites',
'debug_toolbar',
'djangotoolbox',
'autoload',
'dbindexer',
'socialregistration',
'socialregistration.contrib.facebook',
'socialregistration.contrib.twitter',
'blog',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
GAE_SETTINGS_MODULES = (
'gae_db_field_settings',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'socialregistration.contrib.facebook.auth.FacebookAuth',
'socialregistration.contrib.twitter.auth.TwitterAuth',
)
FACEBOOK_APP_ID = '283539771683468'
FACEBOOK_SECRET_KEY = '9b1c4bdfafda6c90db458411fc8d4412'
FACEBOOK_REQUEST_PERMISSIONS = ''
TWITTER_CONSUMER_KEY = 'btfDHMB2gPfTZ8HxeSBg'
TWITTER_CONSUMER_SECRET_KEY = 'RTmqX3erXIR0Z29cXiQ2QDJOn1Vn2Rvn0QbAxDCswuA'
SOCIALREGISTRATION_GENERATE_USERNAME = True
SOCIALREGISTRATION_GENERATE_USERNAME_FUNCTION = 'utils.socregUserCreate'
AUTH_PROFILE_MODULE = "blog.UserProfile"
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'blog/templates'),)
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'blog/assets')
MEDIA_URL = '/assets/'
ROOT_URLCONF = 'blog.urls'
| bsd-3-clause |
durchflieger/DFAtmo | service.py | 1 | 1119 | # ---
# Copyright (C) 2011,2012 Andreas Auras <[email protected]>
#
# This file is part of DFAtmo the driver for 'Atmolight' controllers for XBMC and xinelib based video players.
#
# DFAtmo is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# DFAtmo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
#
# This is the DFAtmo XBMC service addon.
#
# ---
if ( __name__ == "__main__" ):
import xbmc, xbmcaddon
addon = xbmcaddon.Addon(id='script.dfatmo')
if addon.getSetting('enabled') == 'true':
xbmc.executebuiltin('RunAddon("script.dfatmo")')
| gpl-2.0 |
mozilla/olympia | src/olympia/zadmin/management/commands/generate_error.py | 4 | 1435 | from django.core.management.base import BaseCommand
import olympia.core.logger
from olympia.zadmin.tasks import celery_error
log = olympia.core.logger.getLogger('z')
class Command(BaseCommand):
help = 'Generates an exception for testing. From a celery task with --celery'
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'--celery',
default=False,
action='store_true',
help='Raise the error in a celery task',
)
parser.add_argument(
'--log',
default=False,
action='store_true',
help='capture the error inside a log.exception instead',
)
def handle(self, *args, **options):
if options.get('celery'):
celery_error.delay(capture_and_log=options.get('log', False))
print(
'A RuntimeError exception was raised from a celery task. '
'Check the logs!'
)
else:
print('About to raise an exception in management command')
try:
raise RuntimeError('This is an exception from a management command')
except Exception as exception:
if options.get('log', False):
log.exception('Capturing exception as a log', exc_info=exception)
else:
raise exception
| bsd-3-clause |
JGrippo/YACS | scheduler/south_migrations/0011_auto__chg_field_savedselection_internal_blocked_times.py | 2 | 9979 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'SavedSelection', fields ['internal_section_ids']
db.create_index(u'scheduler_savedselection', ['internal_section_ids'])
# Changing field 'SavedSelection.internal_blocked_times'
db.alter_column(u'scheduler_savedselection', 'internal_blocked_times', self.gf('django.db.models.fields.TextField')())
# Adding index on 'SavedSelection', fields ['internal_blocked_times']
db.create_index(u'scheduler_savedselection', ['internal_blocked_times'])
def backwards(self, orm):
# Removing index on 'SavedSelection', fields ['internal_blocked_times']
db.delete_index(u'scheduler_savedselection', ['internal_blocked_times'])
# Removing index on 'SavedSelection', fields ['internal_section_ids']
db.delete_index(u'scheduler_savedselection', ['internal_section_ids'])
# Changing field 'SavedSelection.internal_blocked_times'
db.alter_column(u'scheduler_savedselection', 'internal_blocked_times', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=1024))
models = {
u'courses.course': {
'Meta': {'ordering': "['department__code', 'number']", 'object_name': 'Course'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses'", 'to': u"orm['courses.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'grade_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_comm_intense': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_credits': ('django.db.models.fields.IntegerField', [], {}),
'min_credits': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'prereqs': ('django.db.models.fields.TextField', [], {'default': "''"}),
'semesters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses'", 'symmetrical': 'False', 'through': u"orm['courses.OfferedFor']", 'to': u"orm['courses.Semester']"})
},
u'courses.department': {
'Meta': {'ordering': "['code']", 'object_name': 'Department'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'semesters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'departments'", 'symmetrical': 'False', 'through': u"orm['courses.SemesterDepartment']", 'to': u"orm['courses.Semester']"})
},
u'courses.offeredfor': {
'Meta': {'unique_together': "(('course', 'semester'),)", 'object_name': 'OfferedFor'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'offered_for'", 'to': u"orm['courses.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'offers'", 'to': u"orm['courses.Semester']"})
},
u'courses.period': {
'Meta': {'unique_together': "(('start', 'end', 'days_of_week_flag'),)", 'object_name': 'Period'},
'days_of_week_flag': ('django.db.models.fields.IntegerField', [], {}),
'end': ('django.db.models.fields.TimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.TimeField', [], {'default': 'None', 'null': 'True'})
},
u'courses.section': {
'Meta': {'ordering': "['number']", 'object_name': 'Section'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': u"orm['courses.Course']"}),
'crn': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'crosslisted': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sections'", 'null': 'True', 'to': u"orm['courses.SectionCrosslisting']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'periods': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'sections'", 'symmetrical': 'False', 'through': u"orm['courses.SectionPeriod']", 'to': u"orm['courses.Period']"}),
'seats_taken': ('django.db.models.fields.IntegerField', [], {}),
'seats_total': ('django.db.models.fields.IntegerField', [], {}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': u"orm['courses.Semester']"})
},
u'courses.sectioncrosslisting': {
'Meta': {'object_name': 'SectionCrosslisting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_crosslistings'", 'to': u"orm['courses.Semester']"})
},
u'courses.sectionperiod': {
'Meta': {'unique_together': "(('period', 'section', 'semester'),)", 'object_name': 'SectionPeriod'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructor': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_times'", 'to': u"orm['courses.Period']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_times'", 'to': u"orm['courses.Section']"}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_times'", 'to': u"orm['courses.Semester']"})
},
u'courses.semester': {
'Meta': {'ordering': "['-year', '-month']", 'unique_together': "(('year', 'month'),)", 'object_name': 'Semester'},
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ref': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'courses.semesterdepartment': {
'Meta': {'unique_together': "(('department', 'semester'),)", 'object_name': 'SemesterDepartment'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['courses.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['courses.Semester']"})
},
u'scheduler.savedselection': {
'Meta': {'object_name': 'SavedSelection'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_blocked_times': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'internal_section_ids': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'scheduler.sectionconflict': {
'Meta': {'unique_together': "(('section1', 'section2', 'semester'),)", 'object_name': 'SectionConflict'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['courses.Section']"}),
'section2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['courses.Section']"}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_conflicts'", 'to': u"orm['courses.Semester']"})
},
u'scheduler.selection': {
'Meta': {'object_name': 'Selection'},
'api_cache': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_section_ids': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '255'})
}
}
complete_apps = ['scheduler'] | mit |
mitar/django | django/contrib/gis/geos/tests/test_geos.py | 5 | 42100 | import ctypes
import random
import unittest
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print("\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n")
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print("\nEND - expecting GEOS_ERROR; safe to ignore.\n")
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from io import BytesIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = BytesIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print("\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n")
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print("\nEND - expecting GEOS_NOTICE; safe to ignore.\n")
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print("\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertTrue(not g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
print("\nEND - expecting GEOS_NOTICE; safe to ignore.\n")
def test28_geos_version(self):
"Testing the GEOS version regular expression."
from django.contrib.gis.geos.libgeos import version_regex
versions = [ ('3.0.0rc4-CAPI-1.3.3', '3.0.0'),
('3.0.0-CAPI-1.4.1', '3.0.0'),
('3.4.0dev-CAPI-1.8.0', '3.4.0') ]
for v, expected in versions:
m = version_regex.match(v)
self.assertTrue(m)
self.assertEqual(m.group('version'), expected)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
pquentin/django | tests/i18n/test_compilation.py | 13 | 8847 | # -*- coding: utf-8 -*-
import gettext as gettext_module
import os
import shutil
import stat
import unittest
from django.core.management import (
CommandError, call_command, execute_from_command_line,
)
from django.core.management.utils import find_command
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, captured_stdout
from django.utils import translation
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import ugettext
has_msgfmt = find_command('msgfmt')
@unittest.skipUnless(has_msgfmt, 'msgfmt is mandatory for compilation tests')
class MessageCompilationTests(SimpleTestCase):
test_dir = os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), 'commands'))
def setUp(self):
self._cwd = os.getcwd()
self.addCleanup(os.chdir, self._cwd)
os.chdir(self.test_dir)
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
class PoFileTests(MessageCompilationTests):
LOCALE = 'es_AR'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
with self.assertRaises(CommandError) as cm:
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertIn("file has a BOM (Byte Order Mark)", cm.exception.args[0])
self.assertFalse(os.path.exists(self.MO_FILE))
def test_no_write_access(self):
mo_file_en = 'locale/en/LC_MESSAGES/django.mo'
err_buffer = StringIO()
# put file in read-only mode
old_mode = os.stat(mo_file_en).st_mode
os.chmod(mo_file_en, stat.S_IREAD)
try:
call_command('compilemessages', locale=['en'], stderr=err_buffer, verbosity=0)
err = err_buffer.getvalue()
self.assertIn("not writable location", err)
finally:
os.chmod(mo_file_en, old_mode)
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE = 'fr'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PoFileContentsTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(self.test_dir, self.MO_FILE))
def test_percent_symbol_in_po_file(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE))
class PercentRenderingTests(MessageCompilationTests):
# Ticket #11240 -- Testing rendering doesn't belong here but we are trying
# to keep tests for all the stack together
LOCALE = 'it'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PercentRenderingTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(self.test_dir, self.MO_FILE))
def test_percent_symbol_escaping(self):
with override_settings(LOCALE_PATHS=(os.path.join(self.test_dir, 'locale'),)):
from django.template import Template, Context
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
t = Template('{% load i18n %}{% trans "Looks like a str fmt spec %% o but shouldn\'t be interpreted as such" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation contains %% for the above string')
t = Template('{% load i18n %}{% trans "Completed 50%% of all the tasks" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation of Completed 50%% of all the tasks')
class MultipleLocaleCompilationTests(MessageCompilationTests):
MO_FILE_HR = None
MO_FILE_FR = None
def setUp(self):
super(MultipleLocaleCompilationTests, self).setUp()
localedir = os.path.join(self.test_dir, 'locale')
self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')
self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_HR))
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_FR))
def test_one_locale(self):
with override_settings(LOCALE_PATHS=(os.path.join(self.test_dir, 'locale'),)):
call_command('compilemessages', locale=['hr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
def test_multiple_locales(self):
with override_settings(LOCALE_PATHS=(os.path.join(self.test_dir, 'locale'),)):
call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
self.assertTrue(os.path.exists(self.MO_FILE_FR))
class ExcludedLocaleCompilationTests(MessageCompilationTests):
test_dir = os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), 'exclude'))
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo'
def setUp(self):
super(ExcludedLocaleCompilationTests, self).setUp()
shutil.copytree('canned_locale', 'locale')
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'compilemessages'])
def test_one_locale_excluded(self):
call_command('compilemessages', exclude=['it'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertTrue(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded(self):
call_command('compilemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_one_locale_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
class CompilationErrorHandling(MessageCompilationTests):
LOCALE = 'ja'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(CompilationErrorHandling, self).setUp()
self.addCleanup(self.rmfile, os.path.join(self.test_dir, self.MO_FILE))
def test_error_reported_by_msgfmt(self):
with self.assertRaises(CommandError):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
class FuzzyTranslationTest(MessageCompilationTests):
LOCALE = 'ru'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(FuzzyTranslationTest, self).setUp()
self.addCleanup(self.rmfile, os.path.join(self.test_dir, self.MO_FILE))
gettext_module._translations = {} # flush cache or test will be useless
def test_nofuzzy_compiling(self):
with override_settings(LOCALE_PATHS=(os.path.join(self.test_dir, 'locale'),)):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Vodka'))
def test_fuzzy_compiling(self):
with override_settings(LOCALE_PATHS=(os.path.join(self.test_dir, 'locale'),)):
call_command('compilemessages', locale=[self.LOCALE], fuzzy=True, stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Водка'))
| bsd-3-clause |
shushen/ansible | v2/test/vars/test_variable_manager.py | 23 | 4269 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.vars import VariableManager
from test.mock.loader import DictDataLoader
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
v = VariableManager()
self.assertEqual(v.get_vars(), dict())
self.assertEqual(
v._merge_dicts(
dict(a=1),
dict(b=2)
), dict(a=1, b=2)
)
self.assertEqual(
v._merge_dicts(
dict(a=1, c=dict(foo='bar')),
dict(b=2, c=dict(baz='bam'))
), dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
)
def test_manager_extra_vars(self):
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.set_extra_vars(extra_vars)
self.assertEqual(v.get_vars(), extra_vars)
self.assertIsNot(v.extra_vars, extra_vars)
def test_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
"""
})
v = VariableManager(loader=fake_loader)
v.add_host_vars_file("host_vars/hostname1.yml")
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar"))
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
def test_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/somegroup.yml": """
foo: bar
"""
})
v = VariableManager(loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml")
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar"))
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ["somegroup"]
self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
def test_manager_play_vars(self):
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
def test_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager(loader=fake_loader)
self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
def test_manager_task_vars(self):
mock_task = MagicMock()
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar"))
| gpl-3.0 |
scenarios/tensorflow | tensorflow/contrib/learn/python/learn/estimators/prediction_key.py | 28 | 1026 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enum for model prediction keys."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class PredictionKey(object):
CLASSES = "classes"
PROBABILITIES = "probabilities"
LOGITS = "logits"
LOGISTIC = "logistic"
SCORES = "scores"
TOP_K = "top_k"
GENERIC = "output"
| apache-2.0 |
mylene-campana/hpp-rbprm-corba | script/scenarios/manipulation_romeo_interp.py | 1 | 7693 | from hpp.corbaserver.rbprm.problem_solver import ProblemSolver as ProblemSolverRbprm
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver import Client
from hpp.gepetto import ViewerFactory
from hpp.gepetto import Viewer
import sys
from hpp.corbaserver.manipulation.romeo import Robot
from hpp.corbaserver.manipulation import ProblemSolver, Rule
from hpp.gepetto.manipulation import Viewer, ViewerFactory
from hpp.gepetto import PathPlayer, PathPlayerGui
from math import sqrt
from hpp.gepetto import PathPlayer, PathPlayerGui
from math import sqrt
# Load robot and object. {{{3
# Define classes for the objects {{{4
class Kitchen (object):
rootJointType = "anchor"
packageName = 'iai_maps'
meshPackageName = 'iai_maps'
urdfName = 'kitchen_area'
urdfSuffix = ""
srdfSuffix = ""
joint = "kitchen_area/fridge_block_fridge_joint"
handle = "kitchen_area/fridge_handle_fridge_handle"
class Cup (object):
rootJointType = "freeflyer"
packageName = 'hpp_tutorial'
meshPackageName = 'hpp_tutorial'
urdfName = 'cup'
urdfSuffix = ""
srdfSuffix = ""
joint = "cup/base_joint"
handle = "cup/handle"
Robot.srdfSuffix = "_moveit"
# 4}}}
robot = Robot ('romeo-kitchen', 'romeo')
ps0 = ProblemSolver (robot)
#~ r = Viewer (ps)
vf = ViewerFactory (ps0)
robot.setJointBounds ("romeo/base_joint_xyz" , [-60,20,-50,100, 0, 2])
from os import environ
ins_dir = environ['DEVEL_DIR']
db_dir = ins_dir+"/install/share/hyq-rbprm/database/hyq_"
#~ ps = ProblemSolver( fullBody )
#~ vf = ViewerFactory (ps)
#~ r = Viewer (ps)
robot.setJointBounds ("romeo/base_joint_xyz" , [-60,20,-50,100, 0, 2])
vf.loadObjectModel (Kitchen, "kitchen_area")
vf.loadObjectModel (Cup, "cup")
robot.setJointBounds ('cup/base_joint_xyz', [-60,20,-50,100, 0, 2])
# 3}}}
# Define configurations. {{{3
robot.setCurrentConfig (robot.getInitialConfig ())
q_init = robot.getHandConfig ("both", "open")
rank = robot.rankInConfiguration ['romeo/base_joint_xyz']
# q_init [rank:rank+7] = [-3.5,-3.7, 0.877, 1, 0, 0, 0]
q_init [rank:rank+7] = [-4.264,-4.69, 0.877, 0, 0, 0, 1]
rank = robot.rankInConfiguration ['cup/base_joint_xyz']
q_init [rank:rank+7] = [-4.8, -4.64, 0.91,0,sqrt(2)/2,sqrt(2)/2,0]
q_goal1 = q_init [::]
q_goal2 = q_init [::]
q_goal1 [rank:rank+7] = [-4.73, -3.35, 0.91, 0,sqrt(2)/2,sqrt(2)/2,0]
q_goal2 [rank:rank+7] = [-4.8, -4.70, 0.91, 0,sqrt(2)/2,sqrt(2)/2,0]
# 3}}}
# Create a new manipulation problem
cl = Client()
cl.problem.selectProblem("rbprm")
cl.problem.selectProblem("default")
cl.problem.moveRobotToProblem("rbprm")
cl.problem.selectProblem("rbprm")
fullBody = FullBody ()
fullBody.loadFullBodyModelFromActiveRobot('romeo', {'cup': 'freeflyer', 'kitchen_area': 'anchor', 'romeo': 'freeflyer'}, "romeokitchen", 'romeo_description', '', '')
#~ fullBody.setJointBounds ("base_joint_xyz", [-4,4,-4,4,-4,4])
ps = ProblemSolverRbprm (robot)
r = Viewer (ps)
pp = PathPlayer (fullBody.client.basic, r)
from hpp.corbaserver.affordance.affordance import AffordanceTool
afftool = AffordanceTool ()
afftool.loadObstacleModel ("hpp_environments", "hrp2/floor_as_mesh", "floor", r)
#~ afftool.visualiseAffordances('Support', r, [0.25, 0.5, 0.5])
fullBody.client.rbprm.rbprm.setAffordanceFilter('0rLeg', ['Support',])
fullBody.client.rbprm.rbprm.setAffordanceFilter('1lLeg', ['Support'])
import pickle
with open("romeo_kitchen_path_discretized.pickle", 'r') as f:
qs = pickle.load(f)
fullBody.client.rbprm.rbprm.configToPath(qs)
#~ a = [q for i,q in enumerate(qs) if i % 50 == 0 ]
r.client.gui.addURDF("kitchen_area", "/home_local/dev/hpp/install/share/"+Kitchen.packageName+"/urdf/"+Kitchen.urdfName+".urdf", "")
r.client.gui.addToGroup("kitchen_area", r.sceneName)
r.client.gui.addURDF("cup", "/home_local/dev/hpp/install/share/"+Cup.packageName+"/urdf/"+Cup.urdfName+".urdf", "")
r.client.gui.addToGroup("cup", r.sceneName)
#~ r.loadObstacleModel (Kitchen.packageName, Kitchen.urdfName, "kitchen_area2")
#~ for j in fullBody.client.basic.obstacle.getObstacleNames(True, False):
#~ if( j != 'floor/base_link_0'):
#~ fullBody.client.basic.obstacle.removeObstacleFromJoint('floor/base_link_0', j, True, False)
#~ pp(0)
#~ print "addlef"
rLegId = '0rLeg'
rfoot = 'romeo/RAnkleRoll'
rLeg = 'romeo/RHipYaw'
rLegOffset = [0,0,-0.06839999246139947]
rLegNormal = [0,0,1]
rLegx = 0.1; rLegy = 0.05
fullBody.addLimb(rLegId,rLeg,rfoot,rLegOffset,rLegNormal, rLegx, rLegy, 10000, "static", 0.05, "_6_DOF", True)
#~
#~ print "addlef"
lLegId = '1lLeg'
lLeg = 'romeo/LHipYaw'
lfoot = 'romeo/LAnkleRoll'
lLegOffset = [0,0,-0.06839999246139947]
lLegNormal = [0,0,1]
lLegx = 0.1; lLegy = 0.05
fullBody.addLimb(lLegId,lLeg,lfoot,lLegOffset,rLegNormal, lLegx, lLegy, 10000, "static", 0.1, "_6_DOF", True)
#~ fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
#~ fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
fullBody.setStartState(qs[0],[rLegId,lLegId])
fullBody.setEndState(qs[20],[rLegId,lLegId])
#~ configs = fullBody.interpolate(0.15, 0, 10, True)
#~
for j in fullBody.getAllJointNames():
#~ if j.startswith("kitchen") or j.startswith("cup"):
fullBody.client.basic.obstacle.removeObstacleFromJoint('floor/base_link_0', j, True, False)
#~ ps.pathLength(0) / 100.
configs = fullBody.interpolate(ps.pathLength(0) / 50., 0, 10, True)
limbsCOMConstraints = { rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : rfoot},
lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : lLeg}, }
from hpp.corbaserver.rbprm.tools.cwc_trajectory_helper import step, clean,stats, saveAllData, play_traj
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
def act(i, numOptim = 0, use_window = 0, friction = 0.5, optim_effectors = True, verbose = False, draw = False, trackedEffectors = []):
return step(fullBody, configs, i, numOptim, pp, limbsCOMConstraints, 0.4, optim_effectors = optim_effectors, time_scale = 20., useCOMConstraints = True, use_window = use_window,
verbose = verbose, draw = draw, trackedEffectors = trackedEffectors)
def play(frame_rate = 1./24.):
play_traj(fullBody,pp,frame_rate)
def saveAll(name):
saveAllData(fullBody, r, name)
def draw_com():
global fullBody
c = fullBody.getCenterOfMass()
scene = "com_" + str(c)
r.client.gui.createScene(scene)
r.client.gui.addBox(scene+"/b"+str(0),0.1,0.1,0.1, [1,0,0,1])
r.client.gui.applyConfiguration(scene+"/b"+str(0),c+[1,0,0,0])
r.client.gui.refresh()
r.client.gui.addSceneToWindow(scene,0)
#~ def playPaths(rs = None):
#~ import time
#~ ps.client.problem.selectProblem("rbprm")
#~ ls = [ ps.pathLength(i) for i in range(ps.numberPaths()) ]
#~ if rs is None:
#~ rs = [ vf.createViewer() ]
#~ ps.client.problem.selectProblem("manipulationProblem")
#~ rs.append( manipulation.vf.createViewer() )
#~ for i in range(1000):
#~ ps.client.problem.selectProblem("rbprm")
#~ rs[0] (ps.configAtParam(1,i * ls[1] / 1000.))
#~ ps.client.problem.selectProblem("manipulationProblem")
#~ rs[1] (manipulation.ps.configAtParam(0, i * ls[0] / 1000.))
#~ time.sleep(0.5)
#~ return rs
#~ for i in range(1,5):
#~ act(i,60, use_window = 0, optim_effectors = True, draw = False, verbose = True)
#~ trackedEffectors = [0, 0, 0.15, ['LARM_JOINT5']]
#~ for i in range(0,1):
#~ trackedEffectors = [0, i * 0.15, (i+1) * 0.15, ['LARM_JOINT5']];
#~ act(i,60, use_window = 0, optim_effectors = True, draw = False, verbose = False, trackedEffectors = trackedEffectors)
#~ act(5,0, use_window = 0, friction = 1, optim_effectors = False, draw = False, verbose = True)
| lgpl-3.0 |
jfantom/incubator-airflow | airflow/contrib/hooks/gcp_dataflow_hook.py | 3 | 6444 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import select
import subprocess
import time
import uuid
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class _DataflowJob(LoggingMixin):
def __init__(self, dataflow, project_number, name, poll_sleep=10):
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_id = None
self._job = self._get_job()
self._poll_sleep = poll_sleep
def _get_job_id_from_name(self):
jobs = self._dataflow.projects().jobs().list(
projectId=self._project_number
).execute()
for job in jobs['jobs']:
if job['name'] == self._job_name:
self._job_id = job['id']
return job
return None
def _get_job(self):
if self._job_id is None:
job = self._get_job_id_from_name()
else:
job = self._dataflow.projects().jobs().get(projectId=self._project_number,
jobId=self._job_id).execute()
if 'currentState' in job:
self.log.info(
'Google Cloud DataFlow job %s is %s',
job['name'], job['currentState']
)
else:
self.log.info(
'Google Cloud DataFlow with job_id %s has name %s',
self._job_id, job['name']
)
return job
def wait_for_done(self):
while True:
if 'currentState' in self._job:
if 'JOB_STATE_DONE' == self._job['currentState']:
return True
elif 'JOB_STATE_FAILED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} has failed.".format(
self._job['name']))
elif 'JOB_STATE_CANCELLED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} was cancelled.".format(
self._job['name']))
elif 'JOB_STATE_RUNNING' == self._job['currentState']:
time.sleep(self._poll_sleep)
elif 'JOB_STATE_PENDING' == self._job['currentState']:
time.sleep(15)
else:
self.log.debug(str(self._job))
raise Exception(
"Google Cloud Dataflow job {} was unknown state: {}".format(
self._job['name'], self._job['currentState']))
else:
time.sleep(15)
self._job = self._get_job()
def get(self):
return self._job
class _Dataflow(LoggingMixin):
def __init__(self, cmd):
self.log.info("Running command: %s", ' '.join(cmd))
self._proc = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
def _line(self, fd):
if fd == self._proc.stderr.fileno():
lines = self._proc.stderr.readlines()
for line in lines:
self.log.warning(line[:-1])
line = lines[-1][:-1]
return line
if fd == self._proc.stdout.fileno():
line = self._proc.stdout.readline()
return line
@staticmethod
def _extract_job(line):
if line is not None:
if line.startswith("Submitted job: "):
return line[15:-1]
def wait_for_done(self):
reads = [self._proc.stderr.fileno(), self._proc.stdout.fileno()]
self.log.info("Start waiting for DataFlow process to complete.")
while self._proc.poll() is None:
ret = select.select(reads, [], [], 5)
if ret is not None:
for fd in ret[0]:
line = self._line(fd)
self.log.debug(line[:-1])
else:
self.log.info("Waiting for DataFlow process to complete.")
if self._proc.returncode is not 0:
raise Exception("DataFlow failed with return code {}".format(
self._proc.returncode))
class DataFlowHook(GoogleCloudBaseHook):
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10):
self.poll_sleep = poll_sleep
super(DataFlowHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
http_authorized = self._authorize()
return build('dataflow', 'v1b3', http=http_authorized)
def _start_dataflow(self, task_id, variables, dataflow, name, command_prefix):
cmd = command_prefix + self._build_cmd(task_id, variables, dataflow)
_Dataflow(cmd).wait_for_done()
_DataflowJob(
self.get_conn(), variables['project'], name, self.poll_sleep).wait_for_done()
def start_java_dataflow(self, task_id, variables, dataflow):
name = task_id + "-" + str(uuid.uuid1())[:8]
variables['jobName'] = name
self._start_dataflow(
task_id, variables, dataflow, name, ["java", "-jar"])
def start_python_dataflow(self, task_id, variables, dataflow, py_options):
name = task_id + "-" + str(uuid.uuid1())[:8]
variables["job_name"] = name
self._start_dataflow(
task_id, variables, dataflow, name, ["python"] + py_options)
def _build_cmd(self, task_id, variables, dataflow):
command = [dataflow, "--runner=DataflowRunner"]
if variables is not None:
for attr, value in variables.iteritems():
command.append("--" + attr + "=" + value)
return command
| apache-2.0 |
PappaPeppar/micropython | tests/extmod/framebuf16.py | 26 | 1059 | try:
import framebuf
except ImportError:
print("SKIP")
raise SystemExit
def printbuf():
print("--8<--")
for y in range(h):
print(buf[y * w * 2:(y + 1) * w * 2])
print("-->8--")
w = 4
h = 5
buf = bytearray(w * h * 2)
fbuf = framebuf.FrameBuffer(buf, w, h, framebuf.RGB565)
# fill
fbuf.fill(0xffff)
printbuf()
fbuf.fill(0x0000)
printbuf()
# put pixel
fbuf.pixel(0, 0, 0xeeee)
fbuf.pixel(3, 0, 0xee00)
fbuf.pixel(0, 4, 0x00ee)
fbuf.pixel(3, 4, 0x0ee0)
printbuf()
# get pixel
print(fbuf.pixel(0, 4), fbuf.pixel(1, 1))
# scroll
fbuf.fill(0x0000)
fbuf.pixel(2, 2, 0xffff)
printbuf()
fbuf.scroll(0, 1)
printbuf()
fbuf.scroll(1, 0)
printbuf()
fbuf.scroll(-1, -2)
printbuf()
w2 = 2
h2 = 3
buf2 = bytearray(w2 * h2 * 2)
fbuf2 = framebuf.FrameBuffer(buf2, w2, h2, framebuf.RGB565)
fbuf2.fill(0x0000)
fbuf2.pixel(0, 0, 0x0ee0)
fbuf2.pixel(0, 2, 0xee00)
fbuf2.pixel(1, 0, 0x00ee)
fbuf2.pixel(1, 2, 0xe00e)
fbuf.fill(0xffff)
fbuf.blit(fbuf2, 3, 3, 0x0000)
fbuf.blit(fbuf2, -1, -1, 0x0000)
fbuf.blit(fbuf2, 16, 16, 0x0000)
printbuf()
| mit |
Beauhurst/django | tests/test_runner/test_discover_runner.py | 32 | 7804 | import os
from argparse import ArgumentParser
from contextlib import contextmanager
from unittest import TestSuite, TextTestRunner, defaultTestLoader
from django.test import TestCase
from django.test.runner import DiscoverRunner
@contextmanager
def change_cwd(directory):
current_dir = os.path.abspath(os.path.dirname(__file__))
new_dir = os.path.join(current_dir, directory)
old_cwd = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_cwd)
class DiscoverRunnerTest(TestCase):
def test_init_debug_mode(self):
runner = DiscoverRunner()
self.assertFalse(runner.debug_mode)
def test_add_arguments_debug_mode(self):
parser = ArgumentParser()
DiscoverRunner.add_arguments(parser)
ns = parser.parse_args([])
self.assertFalse(ns.debug_mode)
ns = parser.parse_args(["--debug-mode"])
self.assertTrue(ns.debug_mode)
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 6)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
with change_cwd(".."):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 7)
def test_empty_label(self):
"""
If the test label is empty, discovery should happen on the current
working directory.
"""
with change_cwd("."):
suite = DiscoverRunner().build_suite([])
self.assertEqual(
suite._tests[0].id().split(".")[0],
os.path.basename(os.getcwd()),
)
def test_empty_test_case(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.EmptyTestCase"],
).countTestCases()
self.assertEqual(count, 0)
def test_discovery_on_package(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests"],
).countTestCases()
self.assertEqual(count, 1)
def test_ignore_adjacent(self):
"""
When given a dotted path to a module, unittest discovery searches
not just the module, but also the directory containing the module.
This results in tests from adjacent modules being run when they
should not. The discover runner avoids this behavior.
"""
count = DiscoverRunner().build_suite(
["test_discovery_sample.empty"],
).countTestCases()
self.assertEqual(count, 0)
def test_testcase_ordering(self):
with change_cwd(".."):
suite = DiscoverRunner().build_suite(["test_discovery_sample/"])
self.assertEqual(
suite._tests[0].__class__.__name__,
'TestDjangoTestCase',
msg="TestDjangoTestCase should be the first test case")
self.assertEqual(
suite._tests[1].__class__.__name__,
'TestZimpleTestCase',
msg="TestZimpleTestCase should be the second test case")
# All others can follow in unspecified order, including doctests
self.assertIn('DocTestCase', [t.__class__.__name__ for t in suite._tests[2:]])
def test_duplicates_ignored(self):
"""
Tests shouldn't be discovered twice when discovering on overlapping paths.
"""
base_app = 'forms_tests'
sub_app = 'forms_tests.field_tests'
with self.modify_settings(INSTALLED_APPS={'append': sub_app}):
single = DiscoverRunner().build_suite([base_app]).countTestCases()
dups = DiscoverRunner().build_suite([base_app, sub_app]).countTestCases()
self.assertEqual(single, dups)
def test_reverse(self):
"""
Reverse should reorder tests while maintaining the grouping specified
by ``DiscoverRunner.reorder_by``.
"""
runner = DiscoverRunner(reverse=True)
suite = runner.build_suite(
test_labels=('test_discovery_sample', 'test_discovery_sample2'))
self.assertIn('test_discovery_sample2', next(iter(suite)).id(),
msg="Test labels should be reversed.")
suite = runner.build_suite(test_labels=('test_discovery_sample2',))
suite = tuple(suite)
self.assertIn('DjangoCase', suite[0].id(),
msg="Test groups should not be reversed.")
self.assertIn('SimpleCase', suite[4].id(),
msg="Test groups order should be preserved.")
self.assertIn('DjangoCase2', suite[0].id(),
msg="Django test cases should be reversed.")
self.assertIn('SimpleCase2', suite[4].id(),
msg="Simple test cases should be reversed.")
self.assertIn('UnittestCase2', suite[8].id(),
msg="Unittest test cases should be reversed.")
self.assertIn('test_2', suite[0].id(),
msg="Methods of Django cases should be reversed.")
self.assertIn('test_2', suite[4].id(),
msg="Methods of simple cases should be reversed.")
self.assertIn('test_2', suite[8].id(),
msg="Methods of unittest cases should be reversed.")
def test_overridable_get_test_runner_kwargs(self):
self.assertIsInstance(DiscoverRunner().get_test_runner_kwargs(), dict)
def test_overridable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overridable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overridable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
def test_tags(self):
runner = DiscoverRunner(tags=['core'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 1)
runner = DiscoverRunner(tags=['fast'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 2)
runner = DiscoverRunner(tags=['slow'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 2)
def test_exclude_tags(self):
runner = DiscoverRunner(tags=['fast'], exclude_tags=['core'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 1)
runner = DiscoverRunner(tags=['fast'], exclude_tags=['slow'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 0)
runner = DiscoverRunner(exclude_tags=['slow'])
self.assertEqual(runner.build_suite(['test_discovery_sample.tests_sample']).countTestCases(), 4)
| bsd-3-clause |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/OpenGL/raw/GL/APPLE/flush_buffer_range.py | 3 | 1427 | '''OpenGL extension APPLE.flush_buffer_range
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_APPLE_flush_buffer_range'
_DEPRECATED = False
GL_BUFFER_SERIALIZED_MODIFY_APPLE = constant.Constant( 'GL_BUFFER_SERIALIZED_MODIFY_APPLE', 0x8A12 )
GL_BUFFER_FLUSHING_UNMAP_APPLE = constant.Constant( 'GL_BUFFER_FLUSHING_UNMAP_APPLE', 0x8A13 )
glBufferParameteriAPPLE = platform.createExtensionFunction(
'glBufferParameteriAPPLE',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLenum,constants.GLint,),
doc='glBufferParameteriAPPLE(GLenum(target), GLenum(pname), GLint(param)) -> None',
argNames=('target','pname','param',),
deprecated=_DEPRECATED,
)
glFlushMappedBufferRangeAPPLE = platform.createExtensionFunction(
'glFlushMappedBufferRangeAPPLE',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLintptr,constants.GLsizeiptr,),
doc='glFlushMappedBufferRangeAPPLE(GLenum(target), GLintptr(offset), GLsizeiptr(size)) -> None',
argNames=('target','offset','size',),
deprecated=_DEPRECATED,
)
def glInitFlushBufferRangeAPPLE():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| agpl-3.0 |
johan--/Quiz-Program | vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/v8/tools/testrunner/server/daemon.py | 123 | 3753 | #!/usr/bin/env python
# This code has been written by Sander Marechal and published at:
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
# where the author has placed it in the public domain (see comment #6 at
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/#c6
# ).
# Some minor modifications have been made by the V8 authors. The work remains
# in the public domain.
import atexit
import os
from signal import SIGTERM
from signal import SIGINT
import sys
import time
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
# TODO: (debug) re-enable this!
#os.dup2(si.fileno(), sys.stdin.fileno())
#os.dup2(so.fileno(), sys.stdout.fileno())
#os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
# Give the process a one-second chance to exit gracefully.
os.kill(pid, SIGINT)
time.sleep(1)
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be
called after the process has been daemonized by start() or restart().
"""
| cc0-1.0 |
allenlavoie/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py | 41 | 31331 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator for Dynamic RNNs (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.util import nest
# TODO(jtbates): Remove PredictionType when all non-experimental targets which
# depend on it point to rnn_common.PredictionType.
class PredictionType(object):
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
def _get_state_name(i):
"""Constructs the name string for state component `i`."""
return '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, i)
def state_tuple_to_dict(state):
"""Returns a dict containing flattened `state`.
Args:
state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
have the same rank and agree on all dimensions except the last.
Returns:
A dict containing the `Tensor`s that make up `state`. The keys of the dict
are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
in a depth-first traversal of `state`.
"""
with ops.name_scope('state_tuple_to_dict'):
flat_state = nest.flatten(state)
state_dict = {}
for i, state_component in enumerate(flat_state):
state_name = _get_state_name(i)
state_value = (None if state_component is None
else array_ops.identity(state_component, name=state_name))
state_dict[state_name] = state_value
return state_dict
def dict_to_state_tuple(input_dict, cell):
"""Reconstructs nested `state` from a dict containing state `Tensor`s.
Args:
input_dict: A dict of `Tensor`s.
cell: An instance of `RNNCell`.
Returns:
If `input_dict` does not contain keys 'STATE_PREFIX_i' for `0 <= i < n`
where `n` is the number of nested entries in `cell.state_size`, this
function returns `None`. Otherwise, returns a `Tensor` if `cell.state_size`
is an `int` or a nested tuple of `Tensor`s if `cell.state_size` is a nested
tuple.
Raises:
ValueError: State is partially specified. The `input_dict` must contain
values for all state components or none at all.
"""
flat_state_sizes = nest.flatten(cell.state_size)
state_tensors = []
with ops.name_scope('dict_to_state_tuple'):
for i, state_size in enumerate(flat_state_sizes):
state_name = _get_state_name(i)
state_tensor = input_dict.get(state_name)
if state_tensor is not None:
rank_check = check_ops.assert_rank(
state_tensor, 2, name='check_state_{}_rank'.format(i))
shape_check = check_ops.assert_equal(
array_ops.shape(state_tensor)[1],
state_size,
name='check_state_{}_shape'.format(i))
with ops.control_dependencies([rank_check, shape_check]):
state_tensor = array_ops.identity(state_tensor, name=state_name)
state_tensors.append(state_tensor)
if not state_tensors:
return None
elif len(state_tensors) == len(flat_state_sizes):
dummy_state = cell.zero_state(batch_size=1, dtype=dtypes.bool)
return nest.pack_sequence_as(dummy_state, state_tensors)
else:
raise ValueError(
'RNN state was partially specified.'
'Expected zero or {} state Tensors; got {}'.
format(len(flat_state_sizes), len(state_tensors)))
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def build_sequence_input(features,
sequence_feature_columns,
context_feature_columns,
weight_collections=None,
scope=None):
"""Combine sequence and context features into input for an RNN.
Args:
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features i.e. features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
weight_collections: List of graph collections to which weights are added.
scope: Optional scope, passed through to parsing ops.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, ?]`.
This will be used as input to an RNN.
"""
features = features.copy()
features.update(layers.transform_features(
features,
list(sequence_feature_columns) + list(context_feature_columns or [])))
sequence_input = layers.sequence_input_from_feature_columns(
columns_to_tensors=features,
feature_columns=sequence_feature_columns,
weight_collections=weight_collections,
scope=scope)
if context_feature_columns is not None:
context_input = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=context_feature_columns,
weight_collections=weight_collections,
scope=scope)
sequence_input = _concatenate_context_input(sequence_input, context_input)
return sequence_input
def construct_rnn(initial_state,
sequence_input,
cell,
num_label_columns,
dtype=dtypes.float32,
parallel_iterations=32,
swap_memory=True):
"""Build an RNN and apply a fully connected layer to get the desired output.
Args:
initial_state: The initial state to pass the RNN. If `None`, the
default starting state for `self._cell` is used.
sequence_input: A `Tensor` with shape `[batch_size, padded_length, d]`
that will be passed as input to the RNN.
cell: An initialized `RNNCell`.
num_label_columns: The desired output dimension.
dtype: dtype of `cell`.
parallel_iterations: Number of iterations to run in parallel. Values >> 1
use more memory but take less time, while smaller values use less memory
but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
Returns:
activations: The output of the RNN, projected to `num_label_columns`
dimensions.
final_state: A `Tensor` or nested tuple of `Tensor`s representing the final
state output by the RNN.
"""
with ops.name_scope('RNN'):
rnn_outputs, final_state = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
initial_state=initial_state,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=False)
activations = layers.fully_connected(
inputs=rnn_outputs,
num_outputs=num_label_columns,
activation_fn=None,
trainable=True)
return activations, final_state
def _single_value_predictions(activations,
sequence_length,
target_column,
problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for single value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, num_classes]`.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('SingleValuePrediction'):
last_activations = rnn_common.select_last_activations(
activations, sequence_length)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
if predict_probabilities:
probabilities = target_column.logits_to_predictions(
last_activations, proba=True)
prediction_dict = {
prediction_key.PredictionKey.PROBABILITIES: probabilities,
predictions_name: math_ops.argmax(probabilities, 1)}
else:
predictions = target_column.logits_to_predictions(
last_activations, proba=False)
prediction_dict = {predictions_name: predictions}
return prediction_dict
def _multi_value_loss(
activations, labels, sequence_length, target_column, features):
"""Maps `activations` from the RNN to loss for multi value models.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
labels: A `Tensor` with length `[batch_size, padded_length]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
A scalar `Tensor` containing the loss.
"""
with ops.name_scope('MultiValueLoss'):
activations_masked, labels_masked = rnn_common.mask_activations_and_labels(
activations, labels, sequence_length)
return target_column.loss(activations_masked, labels_masked, features)
def _single_value_loss(
activations, labels, sequence_length, target_column, features):
"""Maps `activations` from the RNN to loss for multi value models.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
labels: A `Tensor` with length `[batch_size]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
A scalar `Tensor` containing the loss.
"""
with ops.name_scope('SingleValueLoss'):
last_activations = rnn_common.select_last_activations(
activations, sequence_length)
return target_column.loss(last_activations, labels, features)
def _get_output_alternatives(prediction_type,
problem_type,
prediction_dict):
"""Constructs output alternatives dict for `ModelFnOps`.
Args:
prediction_type: either `MULTIPLE_VALUE` or `SINGLE_VALUE`.
problem_type: either `CLASSIFICATION` or `LINEAR_REGRESSION`.
prediction_dict: a dictionary mapping strings to `Tensor`s containing
predictions.
Returns:
`None` or a dictionary mapping a string to an output alternative.
Raises:
ValueError: `prediction_type` is not one of `SINGLE_VALUE` or
`MULTIPLE_VALUE`.
"""
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
return None
if prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
prediction_dict_no_state = {
k: v
for k, v in prediction_dict.items()
if rnn_common.RNNKeys.STATE_PREFIX not in k
}
return {'dynamic_rnn_output': (problem_type, prediction_dict_no_state)}
raise ValueError('Unrecognized prediction_type: {}'.format(prediction_type))
def _get_dynamic_rnn_model_fn(
cell_type,
num_units,
target_column,
problem_type,
prediction_type,
optimizer,
sequence_feature_columns,
context_feature_columns=None,
predict_probabilities=False,
learning_rate=None,
gradient_clipping_norm=None,
dropout_keep_probabilities=None,
sequence_length_key=rnn_common.RNNKeys.SEQUENCE_LENGTH_KEY,
dtype=dtypes.float32,
parallel_iterations=None,
swap_memory=True,
name='DynamicRNNModel'):
"""Creates an RNN model function for an `Estimator`.
The model function returns an instance of `ModelFnOps`. When
`problem_type == ProblemType.CLASSIFICATION` and
`predict_probabilities == True`, the returned `ModelFnOps` includes an output
alternative containing the classes and their associated probabilities. When
`predict_probabilities == False`, only the classes are included. When
`problem_type == ProblemType.LINEAR_REGRESSION`, the output alternative
contains only the predicted values.
Args:
cell_type: A string, a subclass of `RNNCell` or an instance of an `RNNCell`.
num_units: A single `int` or a list of `int`s. The size of the `RNNCell`s.
target_column: An initialized `TargetColumn`, used to calculate prediction
and loss.
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
optimizer: A subclass of `Optimizer`, an instance of an `Optimizer` or a
string.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes. Must only be used with
`ProblemType.CLASSIFICATION`.
learning_rate: Learning rate used for optimization. This argument has no
effect if `optimizer` is an instance of an `Optimizer`.
gradient_clipping_norm: A float. Gradients will be clipped to this value.
dropout_keep_probabilities: a list of dropout keep probabilities or `None`.
If a list is given, it must have length `len(num_units) + 1`.
sequence_length_key: The key that will be used to look up sequence length in
the `features` dict.
dtype: The dtype of the state and output of the given `cell`.
parallel_iterations: Number of iterations to run in parallel. Values >> 1
use more memory but take less time, while smaller values use less memory
but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
name: A string that will be used to create a scope for the RNN.
Returns:
A model function to be passed to an `Estimator`.
Raises:
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
ValueError: `prediction_type` is not one of `PredictionType.SINGLE_VALUE`
or `PredictionType.MULTIPLE_VALUE`.
ValueError: `predict_probabilities` is `True` for `problem_type` other
than `ProblemType.CLASSIFICATION`.
ValueError: `len(dropout_keep_probabilities)` is not `len(num_units) + 1`.
"""
if problem_type not in (constants.ProblemType.CLASSIFICATION,
constants.ProblemType.LINEAR_REGRESSION):
raise ValueError(
'problem_type must be ProblemType.LINEAR_REGRESSION or '
'ProblemType.CLASSIFICATION; got {}'.
format(problem_type))
if prediction_type not in (rnn_common.PredictionType.SINGLE_VALUE,
rnn_common.PredictionType.MULTIPLE_VALUE):
raise ValueError(
'prediction_type must be PredictionType.MULTIPLE_VALUEs or '
'PredictionType.SINGLE_VALUE; got {}'.
format(prediction_type))
if (problem_type != constants.ProblemType.CLASSIFICATION
and predict_probabilities):
raise ValueError(
'predict_probabilities can only be set to True for problem_type'
' ProblemType.CLASSIFICATION; got {}.'.format(problem_type))
def _dynamic_rnn_model_fn(features, labels, mode):
"""The model to be passed to an `Estimator`."""
with ops.name_scope(name):
sequence_length = features.get(sequence_length_key)
sequence_input = build_sequence_input(features,
sequence_feature_columns,
context_feature_columns)
dropout = (dropout_keep_probabilities
if mode == model_fn.ModeKeys.TRAIN
else None)
# This class promises to use the cell type selected by that function.
cell = rnn_common.construct_rnn_cell(num_units, cell_type, dropout)
initial_state = dict_to_state_tuple(features, cell)
rnn_activations, final_state = construct_rnn(
initial_state,
sequence_input,
cell,
target_column.num_label_columns,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
loss = None # Created below for modes TRAIN and EVAL.
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
prediction_dict = rnn_common.multi_value_predictions(
rnn_activations, target_column, problem_type, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _multi_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
prediction_dict = _single_value_predictions(
rnn_activations, sequence_length, target_column,
problem_type, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _single_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
state_dict = state_tuple_to_dict(final_state)
prediction_dict.update(state_dict)
eval_metric_ops = None
if mode != model_fn.ModeKeys.INFER:
eval_metric_ops = rnn_common.get_eval_metric_ops(
problem_type, prediction_type, sequence_length, prediction_dict,
labels)
train_op = None
if mode == model_fn.ModeKeys.TRAIN:
train_op = optimizers.optimize_loss(
loss=loss,
global_step=None, # Get it internally.
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=gradient_clipping_norm,
summaries=optimizers.OPTIMIZER_SUMMARIES)
output_alternatives = _get_output_alternatives(prediction_type,
problem_type,
prediction_dict)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=output_alternatives)
return _dynamic_rnn_model_fn
class DynamicRnnEstimator(estimator.Estimator):
"""Dynamically unrolled RNN (deprecated).
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
problem_type,
prediction_type,
sequence_feature_columns,
context_feature_columns=None,
num_classes=None,
num_units=None,
cell_type='basic_rnn',
optimizer='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
dropout_keep_probabilities=None,
model_dir=None,
feature_engineering_fn=None,
config=None):
"""Initializes a `DynamicRnnEstimator`.
The input function passed to this `Estimator` optionally contains keys
`RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to
`RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where
entry `n` corresponds to the length of the `n`th sequence in the batch. The
sequence length feature is required for batches of varying sizes. It will be
used to calculate loss and evaluation metrics. If
`RNNKeys.SEQUENCE_LENGTH_KEY` is not included, all sequences are assumed to
have length equal to the size of dimension 1 of the input to the RNN.
In order to specify an initial state, the input function must include keys
`STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested
elements in `cell.state_size`. The input function must contain values for
all state components or none of them. If none are included, then the default
(zero) state is used as an initial state. See the documentation for
`dict_to_state_tuple` and `state_tuple_to_dict` for further details.
The input function can call rnn_common.construct_rnn_cell() to obtain the
same cell type that this class will select from arguments to __init__.
The `predict()` method of the `Estimator` returns a dictionary with keys
`STATE_PREFIX_i` for `0 <= i < n` where `n` is the number of nested elements
in `cell.state_size`, along with `PredictionKey.CLASSES` for problem type
`CLASSIFICATION` or `PredictionKey.SCORES` for problem type
`LINEAR_REGRESSION`. The value keyed by
`PredictionKey.CLASSES` or `PredictionKey.SCORES` has shape
`[batch_size, padded_length]` in the multi-value case and shape
`[batch_size]` in the single-value case. Here, `padded_length` is the
largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor` passed as input.
Entry `[i, j]` is the prediction associated with sequence `i` and time step
`j`. If the problem type is `CLASSIFICATION` and `predict_probabilities` is
`True`, it will also include key`PredictionKey.PROBABILITIES`.
Args:
problem_type: whether the `Estimator` is intended for a regression or
classification problem. Value must be one of
`ProblemType.CLASSIFICATION` or `ProblemType.LINEAR_REGRESSION`.
prediction_type: whether the `Estimator` should return a value for each
step in the sequence, or just a single value for the final time step.
Must be one of `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the iterable should be
instances of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_classes: the number of classes for a classification problem. Only
used when `problem_type=ProblemType.CLASSIFICATION`.
num_units: A list of integers indicating the number of units in the
`RNNCell`s in each layer.
cell_type: A subclass of `RNNCell` or one of 'basic_rnn,' 'lstm' or 'gru'.
optimizer: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict
probabilities for all classes. Used only if `problem_type` is
`ProblemType.CLASSIFICATION`
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
dropout_keep_probabilities: a list of dropout probabilities or `None`.
If a list is given, it must have length `len(num_units) + 1`. If
`None`, then no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
config: A `RunConfig` instance.
Raises:
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
ValueError: `problem_type` is `ProblemType.CLASSIFICATION` but
`num_classes` is not specified.
ValueError: `prediction_type` is not one of
`PredictionType.MULTIPLE_VALUE` or `PredictionType.SINGLE_VALUE`.
"""
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
name = 'MultiValueDynamicRNN'
elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
name = 'SingleValueDynamicRNN'
else:
raise ValueError(
'prediction_type must be one of PredictionType.MULTIPLE_VALUE or '
'PredictionType.SINGLE_VALUE; got {}'.format(prediction_type))
if problem_type == constants.ProblemType.LINEAR_REGRESSION:
name += 'Regressor'
target_column = layers.regression_target()
elif problem_type == constants.ProblemType.CLASSIFICATION:
if not num_classes:
raise ValueError('For CLASSIFICATION problem_type, num_classes must be '
'specified.')
target_column = layers.multi_class_target(n_classes=num_classes)
name += 'Classifier'
else:
raise ValueError(
'problem_type must be either ProblemType.LINEAR_REGRESSION '
'or ProblemType.CLASSIFICATION; got {}'.format(
problem_type))
if optimizer == 'Momentum':
optimizer = momentum_opt.MomentumOptimizer(learning_rate, momentum)
dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
cell_type=cell_type,
num_units=num_units,
target_column=target_column,
problem_type=problem_type,
prediction_type=prediction_type,
optimizer=optimizer,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
predict_probabilities=predict_probabilities,
learning_rate=learning_rate,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
name=name)
super(DynamicRnnEstimator, self).__init__(
model_fn=dynamic_rnn_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
| apache-2.0 |
mzizzi/ansible | lib/ansible/modules/network/avi/avi_api_session.py | 26 | 8381 | #!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi ([email protected]) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_api_session
author: Gaurav Rastogi ([email protected])
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
api_version: 16.4
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
api_version: 16.4
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
api_version: 16.4
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, ansible_return, HAS_AVI)
from avi.sdk.avi_api import ApiSession
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
tenant_uuid = module.params.get('tenant_uuid', None)
api = ApiSession.get_session(
module.params['controller'], module.params['username'],
module.params['password'], tenant=module.params['tenant'],
tenant_uuid=tenant_uuid)
tenant = module.params.get('tenant', '')
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
# Get the api_version from module.
api_version = module.params.get('api_version', '16.4')
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
if method == 'post':
# need to check if object already exists. In that case
# change the method to be put
gparams['name'] = data['name']
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
try:
existing_obj = rsp.json()['results'][0]
except IndexError:
# object is not found
pass
else:
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put':
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if (len(path.split('/')) == 1) and ('name' in data):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data, api_version=api_version)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
| gpl-3.0 |
googlei18n/TachyFont | run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/misc/homeResFile.py | 11 | 2278 | """Mac-only module to find the home file of a resource."""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
import array
import calldll
import macfs, Res
def HomeResFile(res):
"""Return a path to the file in which resource 'res' lives."""
return GetFileLocation(res.HomeResFile())
def GetFileLocation(refNum):
"""Return a path to the open file identified with refNum."""
pb = ParamBlock(refNum)
return pb.getPath()
#
# Internal cruft, adapted from MoreFiles
#
_InterfaceLib = calldll.getlibrary("InterfaceLib")
GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort")
_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong")
_FCBPBFormat = """
qLink: l
qType: h
ioTrap: h
ioCmdAddr: l
ioCompletion: l
ioResult: h
ioNamePtr: l
ioVRefNum: h
ioRefNum: h
filler: h
ioFCBIndx: h
filler1: h
ioFCBFINm: l
ioFCBFlags: h
ioFCBStBlk: h
ioFCBEOF: l
ioFCBPLen: l
ioFCBCrPs: l
ioFCBVRefNum: h
ioFCBClpSiz: l
ioFCBParID: l
"""
class ParamBlock(object):
"""Wrapper for the very low level FCBPB record."""
def __init__(self, refNum):
self.__fileName = array.array("c", "\0" * 64)
sstruct.unpack(_FCBPBFormat,
"\0" * sstruct.calcsize(_FCBPBFormat), self)
self.ioNamePtr = self.__fileName.buffer_info()[0]
self.ioRefNum = refNum
self.ioVRefNum = GetVRefNum(refNum)
self.__haveInfo = 0
def getInfo(self):
if self.__haveInfo:
return
data = sstruct.pack(_FCBPBFormat, self)
buf = array.array("c", data)
ptr = buf.buffer_info()[0]
err = _getInfo(ptr)
if err:
raise Res.Error("can't get file info", err)
sstruct.unpack(_FCBPBFormat, buf.tostring(), self)
self.__haveInfo = 1
def getFileName(self):
self.getInfo()
data = self.__fileName.tostring()
return data[1:byteord(data[0])+1]
def getFSSpec(self):
self.getInfo()
vRefNum = self.ioVRefNum
parID = self.ioFCBParID
return macfs.FSSpec((vRefNum, parID, self.getFileName()))
def getPath(self):
return self.getFSSpec().as_pathname()
if __name__ == "__main__":
fond = Res.GetNamedResource("FOND", "Helvetica")
print(HomeResFile(fond))
| apache-2.0 |
daf/OWSLib | owslib/util.py | 2 | 13839 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
import sys
from dateutil import parser
from datetime import datetime
import pytz
from owslib.etree import etree
import urlparse, urllib2
from urllib2 import urlopen, HTTPError, Request
from urllib2 import HTTPPasswordMgrWithDefaultRealm
from urllib2 import HTTPBasicAuthHandler
from StringIO import StringIO
import cgi
from urllib import urlencode
import re
"""
Utility functions and classes
"""
class RereadableURL(StringIO,object):
""" Class that acts like a combination of StringIO and url - has seek method and url headers etc """
def __init__(self, u):
#get url headers etc from url
self.headers = u.headers
#get file like seek, read methods from StringIO
content=u.read()
super(RereadableURL, self).__init__(content)
class ServiceException(Exception):
#TODO: this should go in ows common module when refactored.
pass
# http://stackoverflow.com/questions/6256183/combine-two-dictionaries-of-dictionaries-python
dict_union = lambda d1,d2: dict((x,(dict_union(d1.get(x,{}),d2[x]) if
isinstance(d2.get(x),dict) else d2.get(x,d1.get(x)))) for x in
set(d1.keys()+d2.keys()))
# Infinite DateTimes for Python. Used in SWE 2.0 and other OGC specs as "INF" and "-INF"
class InfiniteDateTime(object):
def __lt__(self, other):
return False
def __gt__(self, other):
return True
def timetuple(self):
return tuple()
class NegativeInfiniteDateTime(object):
def __lt__(self, other):
return True
def __gt__(self, other):
return False
def timetuple(self):
return tuple()
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def format_string(prop_string):
"""
Formats a property string to remove spaces and go from CamelCase to pep8
from: http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
if prop_string is None:
return ''
st_r = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', prop_string)
st_r = st_r.replace(' ','')
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', st_r).lower()
def xml_to_dict(root, prefix=None, depth=1, diction=None):
"""
Recursively iterates through an xml element to convert each element in the tree to a (key,val). Where key is the element
tag and val is the inner-text of the element. Note that this recursively go through the tree until the depth specified.
Parameters
===========
:root - root xml element, starting point of iteration
:prefix - a string to prepend to the resulting key (optional)
:depth - the number of depths to process in the tree (optional)
:diction - the dictionary to insert the (tag,text) pairs into (optional)
Return
=======
Dictionary of (key,value); where key is the element tag stripped of namespace and cleaned up to be pep8 and
value is the inner-text of the element. Note that duplicate elements will be replaced by the last element of the
same tag in the tree.
"""
ret = diction if diction is not None else dict()
for child in root:
val = testXMLValue(child)
# skip values that are empty or None
if val is None or val == '':
if depth > 1:
ret = xml_to_dict(child,prefix=prefix,depth=(depth-1),diction=ret)
continue
key = format_string(child.tag.split('}')[-1])
if prefix is not None:
key = prefix + key
ret[key] = val
if depth > 1:
ret = xml_to_dict(child,prefix=prefix,depth=(depth-1),diction=ret)
return ret
def openURL(url_base, data, method='Get', cookies=None, username=None, password=None):
''' function to open urls - wrapper around urllib2.urlopen but with additional checks for OGC service exceptions and url formatting, also handles cookies and simple user password authentication'''
url_base.strip()
lastchar = url_base[-1]
if lastchar not in ['?', '&']:
if url_base.find('?') == -1:
url_base = url_base + '?'
else:
url_base = url_base + '&'
if username and password:
# Provide login information in order to use the WMS server
# Create an OpenerDirector with support for Basic HTTP
# Authentication...
passman = HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url_base, username, password)
auth_handler = HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(auth_handler)
openit = opener.open
else:
# NOTE: optionally set debuglevel>0 to debug HTTP connection
#opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=0))
#openit = opener.open
openit = urlopen
try:
if method == 'Post':
req = Request(url_base, data)
# set appropriate header if posting XML
try:
xml = etree.fromstring(data)
req.add_header('Content-Type', "text/xml")
except:
pass
else:
req=Request(url_base + data)
if cookies is not None:
req.add_header('Cookie', cookies)
u = openit(req)
except HTTPError, e: #Some servers may set the http header to 400 if returning an OGC service exception or 401 if unauthorised.
if e.code in [400, 401]:
raise ServiceException, e.read()
else:
raise e
# check for service exceptions without the http header set
if u.info()['Content-Type'] in ['text/xml', 'application/xml']:
#just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
#wrap the url stram in a extended StringIO object so it's re-readable
u=RereadableURL(u)
se_xml= u.read()
se_tree = etree.fromstring(se_xml)
serviceException=se_tree.find('{http://www.opengis.net/ows}Exception')
if serviceException is None:
serviceException=se_tree.find('ServiceException')
if serviceException is not None:
raise ServiceException, \
str(serviceException.text).strip()
u.seek(0) #return cursor to start of u
return u
#default namespace for nspath is OWS common
OWS_NAMESPACE = 'http://www.opengis.net/ows/1.1'
def nspath(path, ns=OWS_NAMESPACE):
"""
Prefix the given path with the given namespace identifier.
Parameters
----------
- path: ElementTree API Compatible path expression
- ns: the XML namespace URI.
"""
if ns is None or path is None:
return -1
components = []
for component in path.split('/'):
if component != '*':
component = '{%s}%s' % (ns, component)
components.append(component)
return '/'.join(components)
def nspath_eval(xpath, namespaces):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (namespaces[namespace], element))
return '/'.join(out)
def cleanup_namespaces(element):
""" Remove unused namespaces from an element """
if etree.__name__ == 'lxml.etree':
etree.cleanup_namespaces(element)
return element
else:
return etree.fromstring(etree.tostring(element))
def testXMLValue(val, attrib=False):
"""
Test that the XML value exists, return val.text, else return None
Parameters
----------
- val: the value to be tested
"""
if val is not None:
if attrib:
return val.strip()
elif val.text:
return val.text.strip()
else:
return None
else:
return None
def testXMLAttribute(element, attribute):
"""
Test that the XML element and attribute exist, return attribute's value, else return None
Parameters
----------
- element: the element containing the attribute
- attribute: the attribute name
"""
if element is not None:
attrib = element.get(attribute)
if attrib is not None:
return attrib.strip()
return None
def http_post(url=None, request=None, lang='en-US', timeout=10):
"""
Invoke an HTTP POST request
Parameters
----------
- url: the URL of the server
- request: the request message
- lang: the language
- timeout: timeout in seconds
"""
if url is not None:
u = urlparse.urlsplit(url)
r = urllib2.Request(url, request)
r.add_header('User-Agent', 'OWSLib (https://geopython.github.io/OWSLib)')
r.add_header('Content-type', 'text/xml')
r.add_header('Content-length', '%d' % len(request))
r.add_header('Accept', 'text/xml')
r.add_header('Accept-Language', lang)
r.add_header('Accept-Encoding', 'gzip,deflate')
r.add_header('Host', u.netloc)
try:
up = urllib2.urlopen(r,timeout=timeout);
except TypeError:
import socket
socket.setdefaulttimeout(timeout)
up = urllib2.urlopen(r)
ui = up.info() # headers
response = up.read()
up.close()
# check if response is gzip compressed
if ui.has_key('Content-Encoding'):
if ui['Content-Encoding'] == 'gzip': # uncompress response
import gzip
cds = StringIO(response)
gz = gzip.GzipFile(fileobj=cds)
response = gz.read()
return response
def xml2string(xml):
"""
Return a string of XML object
Parameters
----------
- xml: xml string
"""
return '<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>\n' + xml
def xmlvalid(xml, xsd):
"""
Test whether an XML document is valid
Parameters
----------
- xml: XML content
- xsd: pointer to XML Schema (local file path or URL)
"""
xsd1 = etree.parse(xsd)
xsd2 = etree.XMLSchema(xsd1)
doc = etree.parse(StringIO(xml))
return xsd2.validate(doc)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
try:
return tag.split('}')[1]
except:
return tag
def getNamespace(element):
''' Utility method to extract the namespace from an XML element tag encoded as {namespace}localname. '''
if element.tag[0]=='{':
return element.tag[1:].split("}")[0]
else:
return ""
def build_get_url(base_url, params):
''' Utility function to build a full HTTP GET URL from the service base URL and a dictionary of HTTP parameters. '''
qs = []
if base_url.find('?') != -1:
qs = cgi.parse_qsl(base_url.split('?')[1])
pars = [x[0] for x in qs]
for key,value in params.iteritems():
if key not in pars:
qs.append( (key,value) )
urlqs = urlencode(tuple(qs))
return base_url.split('?')[0] + '?' + urlqs
def dump(obj, prefix=''):
'''Utility function to print to standard output a generic object with all its attributes.'''
print "%s %s : %s" % (prefix, obj.__class__, obj.__dict__)
def getTypedValue(type, value):
''' Utility function to cast a string value to the appropriate XSD type. '''
if type=='boolean':
return bool(value)
elif type=='integer':
return int(value)
elif type=='float':
return float(value)
elif type=='string':
return str(value)
else:
return value # no type casting
def extract_time(element):
''' return a datetime object based on a gml text string
ex:
<gml:beginPosition>2006-07-27T21:10:00Z</gml:beginPosition>
<gml:endPosition indeterminatePosition="now"/>
If there happens to be a strange element with both attributes and text,
use the text.
ex: <gml:beginPosition indeterminatePosition="now">2006-07-27T21:10:00Z</gml:beginPosition>
Would be 2006-07-27T21:10:00Z, not 'now'
'''
if element is None:
return None
try:
dt = parser.parse(element.text)
except Exception:
att = testXMLValue(element.attrib.get('indeterminatePosition'), True)
if att and att == 'now':
dt = datetime.utcnow()
dt.replace(tzinfo=pytz.utc)
else:
dt = None
return dt
def extract_xml_list(elements):
"""
Some people don't have seperate tags for their keywords and seperate them with
a newline. This will extract out all of the keywords correctly.
"""
keywords = [re.split(r'[\n\r]+',f.text) for f in elements if f.text]
flattened = [item.strip() for sublist in keywords for item in sublist]
remove_blank = filter(None, flattened)
return remove_blank
def bind_url(url):
"""binds an HTTP GET query string endpiont"""
if url.find('?') == -1: # like http://host/wms
binder = '?'
# if like http://host/wms?foo=bar& or http://host/wms?foo=bar
if url.find('=') != -1:
if url.find('&', -1) != -1: # like http://host/wms?foo=bar&
binder = ''
else: # like http://host/wms?foo=bar
binder = '&'
# if like http://host/wms?foo
if url.find('?') != -1:
if url.find('?', -1) != -1: # like http://host/wms?
binder = ''
elif url.find('&', -1) == -1: # like http://host/wms?foo=bar
binder = '&'
return '%s%s' % (url, binder)
| bsd-3-clause |
ZimmermanGroup/molecularGSM | TEST/ethyleneRotation/mopac/de-gsm/compare.py | 9 | 1337 | # for reading csv format files
import csv
# function to convert strings to float and skip conversion if the value is not a float
def convertToFloat(inList):
for element in inList:
try:
yield float(element)
except ValueError:
yield element
# subtract two floats and skip if string
def subtractFloat(number1, number2):
try:
return (number1 - number2)
except TypeError:
pass
def main():
threshold = 0.001
try:
# read standard and output files
standard = csv.reader(open('stringfile.standard', 'r'), delimiter=' ')
currentOutput = csv.reader(open('stringfile.xyz0001', 'r'), delimiter=' ')
# error if file does not exist
except IOError:
print("Error: File cannot be found!")
exit(1)
# loop over elements of two files simultaneously
for rowStd, rowOut in zip(standard, currentOutput):
rowStd = filter(None, rowStd)
rowOut = filter(None, rowOut)
for valStd, valOut in zip(list(convertToFloat(rowStd)), list(convertToFloat(rowOut))):
# error if difference larger than threshold
if ((subtractFloat(valStd, valOut)) > threshold):
print ((subtractFloat(valStd, valOut)))
exit(2)
return 0
if __name__ == "__main__":
main()
| mit |
felgari/tips | python/string.py | 3 | 1804 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Felipe Gallego. All rights reserved.
#
# This file is part of code-snippets: https://github.com/felgari/snippets
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Strings.
-------------------------------------------------------------------------------
"""
# Remove empty characters at the start and the end of a string.
the_string.strip()
# Find the position of a string into another, returns -1 if not found.
pos = the_string.find(string_to_find)
# Find the last position (find begins at the right) of a string into another,
# returns -1 if not found.
pos = the_string.rfind(string_to_find)
# Check if a string starts with a given substring.
the_string.startswith(substring)
# Replace in a string a given substring with another.
the_string.replace(substring_1, substring_2)
# Get a string of file names from a list containing the file names.
list_of_files = str(list_with_strings).translate(None, "[]\'")
# Generate a list from a string whose elements are substrings delimited by separators.
the_list = the_string.split(separator)
# Putting leading zeros when converting integer to string
# (Add 0 in this case if necessary to get two digits).
str(the_int).zfill(2) | gpl-3.0 |
Resmin/Resmin | resmin/apps/story/serializers.py | 1 | 1874 | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
bio = serializers.CharField(
source='profile.bio', read_only=True)
follower_count = serializers.IntegerField(
source='profile.follower_count', read_only=True)
following_count = serializers.IntegerField(
source='profile.following_count', read_only=True)
story_count = serializers.IntegerField(
source='profile.story_count', read_only=True)
avatar = serializers.CharField(
source='profile.avatar', read_only=True)
class Meta:
model = User
fields = ('username',
'bio',
'following_count',
'follower_count',
'story_count',
'avatar')
class JSONSerializerField(serializers.Field):
""" Serializer for JSONField -- required to make field writable. """
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class StorySerializer(serializers.ModelSerializer):
question_meta_text = serializers.CharField(
source='question_meta.text', read_only=True)
owner = serializers.CharField(
source='owner.username', read_only=True)
cover_img = JSONSerializerField(
read_only=True)
class Meta:
model = 'apps.story.Story'
fields = ('id',
'title',
'description',
'is_nsfw',
'is_featured',
'like_count',
'slot_count',
'comment_count',
'cover_img',
'status',
'owner',
'question',
'question_meta',
'question_meta_text')
| gpl-3.0 |
liverbirdkte/iris-panel | iris/packagedb/apiviews.py | 7 | 2551 | # -*- coding: utf-8 -*-
# This file is part of IRIS: Infrastructure and Release Information System
#
# Copyright (C) 2013 Intel Corporation
#
# IRIS is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2.0 as published by the Free Software Foundation.
"""
This is the API view file for the iris-packagedb application.
Views shown by REST Framework under API URLs are defined here.
"""
# pylint: disable=E1101,W0232,C0111,R0901,R0904,W0613
#W0613: Unused argument %r(here it is request)
from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from iris.core.models import (SubDomain, GitTree, Package, Product)
from iris.packagedb.serializers import (
DomainSerializer, GitTreeSerializer, PackageSerializer, ProductSerializer)
class DomainViewSet(ViewSet):
"""
View to the Domains provided by the API.
"""
def list(self, request):
queryset = SubDomain.objects.prefetch_related(
'domain__role_set__user_set',
'subdomainrole_set__user_set'
).order_by('domain__name', 'name')
serializer = DomainSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, name=None):
domain, subdomain = name.split('/')
obj = get_object_or_404(SubDomain,
name=subdomain.strip(),
domain__name=domain.strip())
serializer = DomainSerializer(obj)
return Response(serializer.data)
class GitTreeViewSet(ReadOnlyModelViewSet):
"""
View to the GitTrees provided by the API.
"""
queryset = GitTree.objects.select_related(
'subdomain__domain',
).prefetch_related(
'packages',
'licenses',
'role_set__user_set'
).order_by('gitpath')
serializer_class = GitTreeSerializer
lookup_field = 'gitpath'
class PackageViewSet(ReadOnlyModelViewSet):
"""
View to the Packages provided by the API.
"""
queryset = Package.objects.prefetch_related('gittree_set').order_by('name')
serializer_class = PackageSerializer
lookup_field = 'name'
class ProductViewSet(ReadOnlyModelViewSet):
"""
View to the Products provided by the API.
"""
queryset = Product.objects.prefetch_related('gittrees').order_by('name')
serializer_class = ProductSerializer
lookup_field = 'name'
| gpl-2.0 |
janslow/boto | boto/sqs/__init__.py | 129 | 1705 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.sqs.regioninfo import SQSRegionInfo
from boto.regioninfo import get_regions
def regions():
"""
Get all available regions for the SQS service.
:rtype: list
:return: A list of :class:`boto.sqs.regioninfo.RegionInfo`
"""
from boto.sqs.connection import SQSConnection
return get_regions(
'sqs',
region_cls=SQSRegionInfo,
connection_cls=SQSConnection
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
d-e-s-o/git-blamediff | cleanup/src/deso/cleanup/__init__.py | 2 | 1367 | # __init__.py
#/***************************************************************************
# * Copyright (C) 2015 Daniel Mueller ([email protected]) *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see <http://www.gnu.org/licenses/>. *
# ***************************************************************************/
"""Initialization file for the deso.cleanup package."""
from deso.cleanup.defer import (
defer,
)
| gpl-3.0 |
adrienbrault/home-assistant | script/scaffold/templates/reproduce_state/tests/test_reproduce_state.py | 5 | 1943 | """Test reproduce state for NEW_NAME."""
import pytest
from homeassistant.core import HomeAssistant, State
from tests.common import async_mock_service
async def test_reproducing_states(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
"""Test reproducing NEW_NAME states."""
hass.states.async_set("NEW_DOMAIN.entity_off", "off", {})
hass.states.async_set("NEW_DOMAIN.entity_on", "on", {"color": "red"})
turn_on_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_on")
turn_off_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("NEW_DOMAIN.entity_off", "off"),
State("NEW_DOMAIN.entity_on", "on", {"color": "red"}),
],
blocking=True,
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("NEW_DOMAIN.entity_off", "not_supported")], blocking=True
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("NEW_DOMAIN.entity_on", "off"),
State("NEW_DOMAIN.entity_off", "on", {"color": "red"}),
# Should not raise
State("NEW_DOMAIN.non_existing", "on"),
],
blocking=True,
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "NEW_DOMAIN"
assert turn_on_calls[0].data == {
"entity_id": "NEW_DOMAIN.entity_off",
"color": "red",
}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "NEW_DOMAIN"
assert turn_off_calls[0].data == {"entity_id": "NEW_DOMAIN.entity_on"}
| mit |
endlessm/chromium-browser | third_party/boringssl/src/third_party/googletest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
engdan77/edoAutoHomeMobile | twisted/test/stdio_test_consumer.py | 3 | 1131 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTests.test_consumer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTests.test_consumer} to test
that process transports implement IConsumer properly.
"""
__import__('_preamble')
import sys
from twisted.python import log, reflect
from twisted.internet import stdio, protocol
from twisted.protocols import basic
def failed(err):
log.startLogging(sys.stderr)
log.err(err)
class ConsumerChild(protocol.Protocol):
def __init__(self, junkPath):
self.junkPath = junkPath
def connectionMade(self):
d = basic.FileSender().beginFileTransfer(file(self.junkPath), self.transport)
d.addErrback(failed)
d.addCallback(lambda ign: self.transport.loseConnection())
def connectionLost(self, reason):
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(ConsumerChild(sys.argv[2]))
reactor.run()
| mit |
juanalfonsopr/odoo | openerp/addons/base/ir/__init__.py | 379 | 1444 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_model
import ir_sequence
import ir_needaction
import ir_ui_menu
import ir_ui_view
import ir_default
import ir_actions
import ir_attachment
import ir_cron
import ir_filters
import ir_values
import ir_translation
import ir_exports
import ir_rule
import ir_config_parameter
import osv_memory_autovacuum
import ir_mail_server
import ir_fields
import ir_qweb
import ir_http
import ir_logging
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zaccoz/odoo | addons/event_sale/__openerp__.py | 306 | 2163 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Events Sales',
'version': '0.1',
'category': 'Tools',
'website' : 'https://www.odoo.com/page/events',
'description': """
Creating registration with sale orders.
=======================================
This module allows you to automate and connect your registration creation with
your main sale flow and therefore, to enable the invoicing feature of registrations.
It defines a new kind of service products that offers you the possibility to
choose an event category associated with it. When you encode a sale order for
that product, you will be able to choose an existing event of that category and
when you confirm your sale order it will automatically create a registration for
this event.
""",
'author': 'OpenERP SA',
'depends': ['event', 'sale_crm'],
'data': [
'event_sale_view.xml',
'event_sale_data.xml',
'event_sale_report.xml',
'views/report_registrationbadge.xml',
'security/ir.model.access.csv',
],
'demo': ['event_demo.xml'],
'test': ['test/confirm.yml'],
'installable': True,
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
towerjoo/mindsbook | django/contrib/gis/db/models/sql/compiler.py | 25 | 12599 | from itertools import izip
from django.db.backends.util import truncate_name
from django.db.models.sql import compiler
from django.db.models.sql.constants import TABLE_NAME
from django.db.models.sql.query import get_proxied_model
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in izip(self.query.select, self.query.select_fields):
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
# This loop customized for GeoQuery.
for (table, col), field in izip(self.query.related_select_cols, self.query.related_select_fields):
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.model._meta
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = self.query.extra_select.keys()
if self.query.aggregates:
# If we have an aggregate annotation, must extend the aliases
# so their corresponding row values are included.
aliases.extend([None for i in xrange(len(self.query.aggregates))])
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in izip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.model._meta.db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
pass
| bsd-3-clause |
spisneha25/django | tests/migrations/test_loader.py | 165 | 13346 | from __future__ import unicode_literals
from unittest import skipIf
from django.db import connection, connections
from django.db.migrations.exceptions import AmbiguityError, NodeNotFoundError
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
set((x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
recorder.flush()
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
recorder.flush()
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '4_auto'),
('app1', '2_squashed_3'),
('app2', '1_squashed_2'),
('app1', '1_auto')
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app1', '4_auto'),
('app1', '3_auto'),
('app2', '1_squashed_2'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4 we'd need to use non-existing migrations
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
recorder.flush()
| bsd-3-clause |
cchurch/ansible | lib/ansible/plugins/action/vyos.py | 10 | 4037 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.vyos.vyos import vyos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'vyos_config' else False
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith('#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit discard')
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
ehsan/airmozilla | airmozilla/manage/views/suggestions.py | 5 | 9769 | import datetime
from django import http
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from django.utils import timezone
from django.db import transaction
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
Template,
SuggestedEvent,
SuggestedEventComment,
LocationDefaultEnvironment,
Approval,
)
from airmozilla.manage import forms
from airmozilla.manage import sending
from airmozilla.comments.models import Discussion, SuggestedDiscussion
from .decorators import staff_required, permission_required
@staff_required
@permission_required('main.add_event')
def suggestions(request):
context = {}
events = (
SuggestedEvent.objects
.filter(accepted=None)
.exclude(first_submitted=None)
.order_by('submitted')
)
context['include_old'] = request.GET.get('include_old')
if not context['include_old']:
now = timezone.now()
then = now - datetime.timedelta(days=30)
events = events.filter(first_submitted__gte=then)
context['events'] = events
return render(request, 'manage/suggestions.html', context)
@staff_required
@permission_required('main.add_event')
@transaction.atomic
def suggestion_review(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
real_event_form = None
comment_form = forms.SuggestedEventCommentForm()
if request.method == 'POST':
if not event.submitted:
return http.HttpResponseBadRequest('Not submitted')
form = forms.AcceptSuggestedEventForm(
request.POST,
instance=event,
)
if request.POST.get('save_comment'):
comment_form = forms.SuggestedEventCommentForm(data=request.POST)
if comment_form.is_valid():
comment = SuggestedEventComment.objects.create(
comment=comment_form.cleaned_data['comment'].strip(),
user=request.user,
suggested_event=event
)
sending.email_about_suggestion_comment(
comment,
request.user,
request
)
messages.info(
request,
'Comment added and %s notified.' % comment.user.email
)
return redirect('manage:suggestion_review', event.pk)
reject = request.POST.get('reject')
if reject:
form.fields['review_comments'].required = True
if not request.POST.get('save_comment') and form.is_valid():
form.save()
if reject:
event.submitted = None
event.status = SuggestedEvent.STATUS_REJECTED
event.save()
sending.email_about_rejected_suggestion(
event,
request.user,
request
)
messages.info(
request,
'Suggested event bounced back and %s has been emailed'
% (event.user.email,)
)
url = reverse('manage:suggestions')
return redirect(url)
else:
dict_event = {
'title': event.title,
'description': event.description,
'short_description': event.short_description,
'start_time': event.start_time,
'timezone': event.location.timezone,
'location': event.location.pk,
'channels': [x.pk for x in event.channels.all()],
'call_info': event.call_info,
'privacy': event.privacy,
'popcorn_url': event.popcorn_url,
'estimated_duration': event.estimated_duration,
'topics': [x.pk for x in event.topics.all()],
}
if dict_event['popcorn_url'] == 'https://':
dict_event['popcorn_url'] = ''
real_event_form = forms.EventRequestForm(
data=dict_event,
)
real_event_form.fields['placeholder_img'].required = False
if real_event_form.is_valid():
real = real_event_form.save(commit=False)
real.placeholder_img = event.placeholder_img
real.picture = event.picture
real.slug = event.slug
real.additional_links = event.additional_links
real.remote_presenters = event.remote_presenters
real.creator = request.user
if real.popcorn_url and not event.upcoming:
real.archive_time = real.start_time
if event.upcoming:
real.status = Event.STATUS_SUBMITTED
# perhaps we have a default location template
# environment
if real.location:
try:
default = (
LocationDefaultEnvironment.objects
.get(
location=real.location,
privacy=real.privacy
)
)
real.template = default.template
real.template_environment = (
default.template_environment
)
except LocationDefaultEnvironment.DoesNotExist:
pass
else:
real.status = Event.STATUS_PENDING
real.save()
[real.tags.add(x) for x in event.tags.all()]
[real.channels.add(x) for x in event.channels.all()]
[real.topics.add(x) for x in event.topics.all()]
event.accepted = real
event.save()
# create the necessary approval bits
if event.privacy == Event.PRIVACY_PUBLIC:
groups = []
for topic in real.topics.filter(is_active=True):
for group in topic.groups.all():
if group not in groups:
groups.append(group)
for group in groups:
Approval.objects.create(
event=real,
group=group,
)
sending.email_about_approval_requested(
real,
group,
request
)
try:
discussion = SuggestedDiscussion.objects.get(
event=event,
enabled=True
)
real_discussion = Discussion.objects.create(
enabled=True,
event=real,
notify_all=discussion.notify_all,
moderate_all=discussion.moderate_all,
)
for moderator in discussion.moderators.all():
real_discussion.moderators.add(moderator)
except SuggestedDiscussion.DoesNotExist:
pass
# if this is a popcorn event, and there is a default
# popcorn template, then assign that
if real.popcorn_url:
real.status = Event.STATUS_SCHEDULED
templates = Template.objects.filter(
default_popcorn_template=True
)
for template in templates[:1]:
real.template = template
real.save()
sending.email_about_accepted_suggestion(
event,
real,
request
)
messages.info(
request,
'New event created from suggestion.'
)
if real.popcorn_url or not event.upcoming:
url = reverse('manage:events')
else:
url = reverse('manage:event_edit', args=(real.pk,))
return redirect(url)
else:
print real_event_form.errors
else:
form = forms.AcceptSuggestedEventForm(instance=event)
# we don't need the label for this form layout
comment_form.fields['comment'].label = ''
comments = (
SuggestedEventComment.objects
.filter(suggested_event=event)
.select_related('User')
.order_by('created')
)
discussion = None
for each in SuggestedDiscussion.objects.filter(event=event):
discussion = each
context = {
'event': event,
'form': form,
'real_event_form': real_event_form,
'comment_form': comment_form,
'comments': comments,
'discussion': discussion,
}
return render(request, 'manage/suggestion_review.html', context)
| bsd-3-clause |
skidzo/mubosym | exa_9_moving_pendulum.py | 2 | 2254 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 21:15:49 2015
@author: oliver
"""
import numpy as np
from sympy import symbols
import mubosym as mbs
############################################################
# general system setup example
myMBS = mbs.MBSworld('moving_pendulum', connect=True, force_db_setup=False)
I = [0.,0.,0.]
############################################################
# rotating frame constraint
#
omega = 2.5 #try up to 30
A = 2.0
def rotation_inp(t):
return A*np.sin(omega*t)
def rotation_inp_diff(t):
return A*omega*np.cos(omega*t)
def rotation_inp_diff_2(t):
return -A*omega*omega*np.sin(omega*t)
myMBS.add_parameter('phi', rotation_inp, rotation_inp_diff, rotation_inp_diff_2)
myMBS.add_moving_marker_para('rot_M0', 'world', 'phi', 0., 0., 0., 'X')
#myMBS.add_body_3d('mount', 'rot_M0', 1.0, I , 'rod-zero', parameters = [1.0,'X']) #[np.pi/2., 2.0])
#myMBS.add_force_special('mount', 'grav')
#myMBS.add_marker('mount_M0', 'mount', 0.,0.,0.)
myMBS.add_body_3d('pendulum', 'rot_M0', 1.0, I , 'rod-1-cardanic', parameters = [-1.5,0.]) #[np.pi/2., 2.0])
myMBS.add_force_special('pendulum', 'grav')
x0 = np.hstack(( 0. * np.ones(myMBS.dof), 0. * np.ones(myMBS.dof)))
for b in myMBS.bodies.keys():
myMBS.add_damping(b,0.02)
#################################################
# constants
g = symbols('g')
constants = [ g ] # Parameter definitions
constants_vals = [9.81] # Numerical value
const_dict = dict(zip(constants, constants_vals))
myMBS.set_const_dict(const_dict)
myMBS.kaneify()
body_frames_in_graphics = ['rot_M0','pendulum']
fixed_frames_in_graphics = []
bodies_in_graphics = {'pendulum': 'sphere'}
myMBS.prep_lambdas(body_frames_in_graphics, fixed_frames_in_graphics, [], bodies_in_graphics)
dt = 0.01 # refine if necesarry
t_max = 20.
####################
####
myMBS.inte_grate_full(x0, t_max, dt, mode = 0)
x_final = myMBS.x_t[-1]
################################################
# linear analysis of the last state (returns also the jacobian)
jac = myMBS.calc_lin_analysis_n(len(myMBS.x_t)-1)
myMBS.prepare(mbs.DATA_PATH, save=True)
#use a smaller time scale for good animation results
myMBS.animate(t_max, dt, scale = 4, time_scale = 1.0, t_ani = 20.)
| mit |
hbhzwj/imalse | tools/ns-allinone-3.14.1/pybindgen-0.15.0.809/waf-tools/shellcmd.py | 137 | 12146 | # Copyright (C) 2008 Gustavo J. A. M. Carneiro <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import shlex
import subprocess
import sys
import re
import os
env_var_rx = re.compile(r"^([a-zA-Z0-9_]+)=(\S+)$")
def debug(message):
print >> sys.stderr, message
if sys.platform == 'win32':
dev_null = open("NUL:", "w")
else:
dev_null = open("/dev/null", "w")
fcntl = fd = fl = None
try:
import fcntl
except ImportError:
pass
else:
fd = dev_null.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, fl | fcntl.FD_CLOEXEC)
del fcntl, fd, fl
def _open_out_file(filename):
if filename in ['NUL:', '/dev/null']:
return dev_null
else:
return open(filename, 'wb')
class Node(object):
pass
class Op(Node):
pass
class Pipe(Op):
pass
class And(Op):
pass
class Or(Op):
pass
class Command(Node):
class PIPE(object):
pass # PIPE is a constant
class STDOUT(object):
pass # PIPE is a constant
def __init__(self, name):
super(Command, self).__init__()
self.name = name # command name
self.argv = [name] # command argv
self.stdin = None
self.stdout = None
self.stderr = None
self.env_vars = None
def __repr__(self):
return "Command(%r, argv=%r, stdin=%r, stdout=%r, stderr=%r)" \
% (self.name, self.argv, self.stdin, self.stdout, self.stderr)
class Chdir(Node):
def __init__(self):
super(Chdir, self).__init__()
self.dir = None
def __repr__(self):
return "Chdir(%r)" \
% (self.dir)
class Pipeline(object):
def __init__(self):
self.current_command = None
self.pipeline = []
def _commit_command(self):
assert self.current_command is not None
self.pipeline.append(self.current_command)
self.current_command = None
def get_abbreviated_command(self):
l = []
for node in self.pipeline:
if isinstance(node, Command):
l.append(node.name)
if isinstance(node, Chdir):
l.append('cd %s' % node.dir)
elif isinstance(node, Pipe):
l.append('|')
elif isinstance(node, And):
l.append('&&')
elif isinstance(node, And):
l.append('||')
return ' '.join(l)
def parse(self, command):
self.current_command = None
self.pipeline = []
if isinstance(command, list):
tokens = list(command)
else:
tokens = shlex.split(command)
debug("command: shlex: %r" % (tokens,))
BEGIN, COMMAND, CHDIR, STDERR, STDOUT, STDIN = range(6)
state = BEGIN
self.current_command = None
env_vars = dict()
while tokens:
token = tokens.pop(0)
if state == BEGIN:
env_var_match = env_var_rx.match(token)
if env_var_match is not None:
env_vars[env_var_match.group(1)] = env_var_match.group(2)
else:
assert self.current_command is None
if token == 'cd':
self.current_command = Chdir()
assert not env_vars
state = CHDIR
else:
self.current_command = Command(token)
if env_vars:
self.current_command.env_vars = env_vars
env_vars = dict()
state = COMMAND
elif state == COMMAND:
if token == '>':
state = STDOUT
elif token == '2>':
state = STDERR
elif token == '2>&1':
assert self.current_command.stderr is None
self.current_command.stderr = Command.STDOUT
elif token == '<':
state = STDIN
elif token == '|':
assert self.current_command.stdout is None
self.current_command.stdout = Command.PIPE
self._commit_command()
self.pipeline.append(Pipe())
state = BEGIN
elif token == '&&':
self._commit_command()
self.pipeline.append(And())
state = BEGIN
elif token == '||':
self._commit_command()
self.pipeline.append(Or())
state = BEGIN
else:
self.current_command.argv.append(token)
elif state == CHDIR:
if token == '&&':
self._commit_command()
self.pipeline.append(And())
state = BEGIN
else:
assert self.current_command.dir is None
self.current_command.dir = token
elif state == STDOUT:
assert self.current_command.stdout is None
self.current_command.stdout = token
state = COMMAND
elif state == STDERR:
assert self.current_command.stderr is None
self.current_command.stderr = token
state = COMMAND
elif state == STDIN:
assert self.current_command.stdin is None
self.current_command.stdin = token
state = COMMAND
self._commit_command()
return self.pipeline
def _exec_piped_commands(self, commands):
retvals = []
for cmd in commands:
retvals.append(cmd.wait())
retval = 0
for r in retvals:
if r:
retval = retvals[-1]
break
return retval
def run(self, verbose=False):
pipeline = list(self.pipeline)
files_to_close = []
piped_commands = []
piped_commands_display = []
BEGIN, PIPE = range(2)
state = BEGIN
cwd = '.'
while pipeline:
node = pipeline.pop(0)
if isinstance(node, Chdir):
next_op = pipeline.pop(0)
assert isinstance(next_op, And)
cwd = os.path.join(cwd, node.dir)
if verbose:
piped_commands_display.append("cd %s &&" % node.dir)
continue
assert isinstance(node, (Command, Chdir))
cmd = node
if verbose:
if cmd.env_vars:
env_vars_str = ' '.join(['%s=%s' % (key, val) for key, val in cmd.env_vars.iteritems()])
piped_commands_display.append("%s %s" % (env_vars_str, ' '.join(cmd.argv)))
else:
piped_commands_display.append(' '.join(cmd.argv))
if state == PIPE:
stdin = piped_commands[-1].stdout
elif cmd.stdin is not None:
stdin = open(cmd.stdin, "r")
if verbose:
piped_commands_display.append('< %s' % cmd.stdin)
files_to_close.append(stdin)
else:
stdin = None
if cmd.stdout is None:
stdout = None
elif cmd.stdout is Command.PIPE:
stdout = subprocess.PIPE
else:
stdout = _open_out_file(cmd.stdout)
files_to_close.append(stdout)
if verbose:
piped_commands_display.append('> %s' % cmd.stdout)
if cmd.stderr is None:
stderr = None
elif cmd.stderr is Command.PIPE:
stderr = subprocess.PIPE
elif cmd.stderr is Command.STDOUT:
stderr = subprocess.STDOUT
if verbose:
piped_commands_display.append('2>&1')
else:
stderr = _open_out_file(cmd.stderr)
files_to_close.append(stderr)
if verbose:
piped_commands_display.append('2> %s' % cmd.stderr)
if cmd.env_vars:
env = dict(os.environ)
env.update(cmd.env_vars)
else:
env = None
if cwd == '.':
proc_cwd = None
else:
proc_cwd = cwd
debug("command: subprocess.Popen(argv=%r, stdin=%r, stdout=%r, stderr=%r, env_vars=%r, cwd=%r)"
% (cmd.argv, stdin, stdout, stderr, cmd.env_vars, proc_cwd))
proc = subprocess.Popen(cmd.argv, stdin=stdin, stdout=stdout, stderr=stderr, env=env, cwd=proc_cwd)
del stdin, stdout, stderr
piped_commands.append(proc)
try:
next_node = pipeline.pop(0)
except IndexError:
try:
retval = self._exec_piped_commands(piped_commands)
if verbose:
print "%s: exit code %i" % (' '.join(piped_commands_display), retval)
finally:
for f in files_to_close:
if f is not dev_null:
f.close()
files_to_close = []
return retval
else:
if isinstance(next_node, Pipe):
state = PIPE
piped_commands_display.append('|')
elif isinstance(next_node, Or):
try:
this_retval = self._exec_piped_commands(piped_commands)
finally:
for f in files_to_close:
if f is not dev_null:
f.close()
files_to_close = []
if this_retval == 0:
if verbose:
print "%s: exit code %i (|| is short-circuited)" % (' '.join(piped_commands_display), retval)
return this_retval
if verbose:
print "%s: exit code %i (|| proceeds)" % (' '.join(piped_commands_display), retval)
state = BEGIN
piped_commands = []
piped_commands_display = []
elif isinstance(next_node, And):
try:
this_retval = self._exec_piped_commands(piped_commands)
finally:
for f in files_to_close:
if f is not dev_null:
f.close()
files_to_close = []
if this_retval != 0:
if verbose:
print "%s: exit code %i (&& is short-circuited)" % (' '.join(piped_commands_display), retval)
return this_retval
if verbose:
print "%s: exit code %i (&& proceeds)" % (' '.join(piped_commands_display), retval)
state = BEGIN
piped_commands = []
piped_commands_display = []
def _main():
pipeline = Pipeline()
pipeline.parse('./foo.py 2>&1 < xxx | cat && ls')
print pipeline.run()
if __name__ == '__main__':
_main()
| gpl-3.0 |
AurelienNioche/MonkeyProject | data_management/data_manager.py | 1 | 6946 | import numpy as np
from data_management.database import Database
from utils.utils import log, today
class DataManager(object):
name = "DataManager"
def __init__(self, monkey, starting_point="2016-12-01", end_point=today(), database_path=None):
self.db = Database(database_path)
self.monkey = monkey
self.starting_point = starting_point
self.end_point = end_point
def select_relevant_dates(self, dates_list):
log("Starting point: {}.".format(self.starting_point), self.name)
log("End point: {}.".format(self.end_point), self.name)
starting_point = [int(i) for i in self.starting_point.split("-")]
end_point = [int(i) for i in self.end_point.split("-")]
relevant_dates = []
for str_date in dates_list:
date = [int(i) for i in str_date.split("-")]
# If year of date is between the years of starting point and end point (but not equal to them)
if starting_point[0] < date[0] < end_point[0]:
relevant_dates.append(str_date)
elif starting_point[0] > date[0] or date[0] > end_point[0]:
continue
# If year of date is equal to the years of starting point and end point (which are equal)
elif date[0] == starting_point[0] == end_point[0]:
if starting_point[1] > date[1] or date[1] > end_point[1]:
continue
elif (end_point[1] > date[1] > starting_point[1]) \
or (date[1] == starting_point[1] == end_point[1]
and starting_point[2] <= date[2] <= end_point[2]) \
or (date[1] == starting_point[1]
and date[2] >= starting_point[2]) \
or (date[1] == end_point[1]
and date[2] <= end_point[2]):
relevant_dates.append(str_date)
# If year of date is equal to the year of starting point (and is inferior to the year of end point)
elif date[0] == starting_point[0]:
if (date[1] > starting_point[1])\
or (date[1] == starting_point[1]
and date[2] >= starting_point[2]):
relevant_dates.append(str_date)
# If year of date is equal to the year of starting point (and is superior to the year of starting point)
elif date[0] == end_point[0]:
if (date[1] < end_point[1]) \
or (date[1] == end_point[1]
and date[2] <= end_point[2]):
relevant_dates.append(str_date)
return relevant_dates
def get_dates(self):
assert self.db.table_exists("summary")
all_dates = np.unique(self.db.read_column(table_name="summary", column_name='date', monkey=self.monkey))
assert len(all_dates)
dates = self.select_relevant_dates(all_dates)
log("N dates: {}.".format(len(dates)), self.name)
log("Relevant dates: {}".format(dates), self.name)
return dates
def get_errors_p_x0_x1_choices_from_db(self, dates):
p = {"left": [], "right": []}
x0 = {"left": [], "right": []}
x1 = {"left": [], "right": []}
error = []
choice = []
session = []
date_list = []
for idx, date in enumerate(sorted(dates)):
session_table = \
self.db.read_column(table_name="summary", column_name='session_table',
monkey=self.monkey, date=date)
if type(session_table) == list:
session_table = session_table[-1]
error_session = self.db.read_column(table_name=session_table, column_name="error")
choice_session = self.db.read_column(table_name=session_table, column_name="choice")
error += error_session
choice += choice_session
session += [idx, ] * len(error_session)
date_list += [date, ] * len(error_session)
for side in ["left", "right"]:
p[side] += \
[float(i) for i in self.db.read_column(table_name=session_table, column_name='{}_p'.format(side))]
x0[side] += \
[int(i) for i in self.db.read_column(table_name=session_table, column_name='{}_x0'.format(side))]
x1[side] += \
[int(i) for i in self.db.read_column(table_name=session_table, column_name='{}_x1'.format(side))]
return error, p, x0, x1, choice, session, date_list
def filter_valid_trials(self, error, p, x0, x1, choice, session, date):
new_p = {"left": [], "right": []}
new_x0 = {"left": [], "right": []}
new_x1 = {"left": [], "right": []}
new_choice = []
new_session = []
new_date = []
valid_trials = np.where(np.asarray(error) == "None")[0]
log("N valid trials: {}.".format(len(valid_trials)), self.name)
for valid_idx in valid_trials:
new_date.append(date[valid_idx])
new_session.append(session[valid_idx])
new_choice.append(choice[valid_idx])
for side in ["left", "right"]:
new_p[side].append(p[side][valid_idx])
new_x0[side].append(x0[side][valid_idx])
new_x1[side].append(x1[side][valid_idx])
for side in ["left", "right"]:
new_p[side] = np.asarray(new_p[side])
new_x0[side] = np.asarray(new_x0[side])
new_x1[side] = np.asarray(new_x1[side])
new_choice = np.asarray(new_choice)
new_session = np.asarray(new_session)
new_date = np.asarray(new_date)
return new_p, new_x0, new_x1, new_choice, new_session, new_date
def run(self):
log("Import data for {}.".format(self.monkey), self.name)
dates = self.get_dates()
assert len(dates), "Fatal: No valid dates found, \n" \
"Please give a look at the analysis parameters (analysis/parameters/parameters.py)."
error, p, x0, x1, choice, session, date = self.get_errors_p_x0_x1_choices_from_db(dates)
p, x0, x1, choice, session, date = self.filter_valid_trials(error, p, x0, x1, choice, session, date)
assert sum(x1["left"]) == 0 and sum(x1["right"]) == 0
log("Done!", self.name)
return {"p": p, "x0": x0, "x1": x1, "choice": choice, "session": session, "date": date}
def import_data(monkey, starting_point="2016-12-01", end_point=today(), database_path=None):
d = DataManager(monkey=monkey, starting_point=starting_point, end_point=end_point, database_path=database_path)
return d.run()
def main():
d = DataManager(monkey='Havane', starting_point="2016-08-01", end_point=today())
return d.get_dates()
if __name__ == "__main__":
main()
| gpl-3.0 |
1stvamp/pip | tests/functional/test_install_config.py | 2 | 6536 | import os
import tempfile
import textwrap
def test_options_from_env_vars(script):
"""
Test if ConfigOptionParser reads env vars (e.g. not using PyPI here)
"""
script.environ['PIP_NO_INDEX'] = '1'
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Ignoring indexes:" in result.stdout, str(result)
assert (
"DistributionNotFound: No distributions at all found for INITools"
in result.stdout
)
def test_command_line_options_override_env_vars(script, virtualenv):
"""
Test that command line options override environmental variables.
"""
script.environ['PIP_INDEX_URL'] = 'http://b.pypi.python.org/simple/'
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Getting page http://b.pypi.python.org/simple/INITools"
in result.stdout
)
virtualenv.clear()
result = script.pip(
'install', '-vvv', '--index-url', 'http://download.zope.org/ppix',
'INITools',
expect_error=True,
)
assert "b.pypi.python.org" not in result.stdout
assert "Getting page http://download.zope.org/ppix" in result.stdout
def test_env_vars_override_config_file(script, virtualenv):
"""
Test that environmental variables override settings in config files.
"""
fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-')
try:
_test_env_vars_override_config_file(script, virtualenv, config_file)
finally:
# `os.close` is a workaround for a bug in subprocess
# http://bugs.python.org/issue3210
os.close(fd)
os.remove(config_file)
def _test_env_vars_override_config_file(script, virtualenv, config_file):
# set this to make pip load it
script.environ['PIP_CONFIG_FILE'] = config_file
# It's important that we test this particular config value ('no-index')
# because there is/was a bug which only shows up in cases in which
# 'config-item' and 'config_item' hash to the same value modulo the size
# of the config dictionary.
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
no-index = 1
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"DistributionNotFound: No distributions at all found for INITools"
in result.stdout
)
script.environ['PIP_NO_INDEX'] = '0'
virtualenv.clear()
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Successfully installed INITools" in result.stdout
def test_command_line_append_flags(script, virtualenv, data):
"""
Test command line flags that append to defaults set by environmental
variables.
"""
script.environ['PIP_FIND_LINKS'] = 'http://pypi.pinaxproject.com'
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
)
virtualenv.clear()
result = script.pip(
'install', '-vvv', '--find-links', data.find_links, 'INITools',
expect_error=True,
)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
)
assert "Skipping link %s" % data.find_links in result.stdout
def test_command_line_appends_correctly(script, data):
"""
Test multiple appending options set by environmental variables.
"""
script.environ['PIP_FIND_LINKS'] = (
'http://pypi.pinaxproject.com %s' % data.find_links
)
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
), result.stdout
assert "Skipping link %s" % data.find_links in result.stdout
def test_config_file_override_stack(script, virtualenv):
"""
Test config files (global, overriding a global config with a
local, overriding all with a command line flag).
"""
fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-')
try:
_test_config_file_override_stack(script, virtualenv, config_file)
finally:
# `os.close` is a workaround for a bug in subprocess
# http://bugs.python.org/issue3210
os.close(fd)
os.remove(config_file)
def _test_config_file_override_stack(script, virtualenv, config_file):
# set this to make pip load it
script.environ['PIP_CONFIG_FILE'] = config_file
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
index-url = http://download.zope.org/ppix
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Getting page http://download.zope.org/ppix/INITools" in result.stdout
)
virtualenv.clear()
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
index-url = http://download.zope.org/ppix
[install]
index-url = https://pypi.gocept.com/
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Getting page https://pypi.gocept.com/INITools" in result.stdout
result = script.pip(
'install', '-vvv', '--index-url', 'http://pypi.python.org/simple',
'INITools',
expect_error=True,
)
assert (
"Getting page http://download.zope.org/ppix/INITools"
not in result.stdout
)
assert "Getting page https://pypi.gocept.com/INITools" not in result.stdout
assert (
"Getting page http://pypi.python.org/simple/INITools" in result.stdout
)
def test_log_file_no_directory():
"""
Test opening a log file with no directory name.
"""
from pip.basecommand import open_logfile
fp = open_logfile('testpip.log')
fp.write('can write')
fp.close()
assert os.path.exists(fp.name)
os.remove(fp.name)
def test_options_from_venv_config(script, virtualenv):
"""
Test if ConfigOptionParser reads a virtualenv-local config file
"""
from pip.locations import default_config_basename
conf = "[global]\nno-index = true"
ini = virtualenv.location / default_config_basename
with open(ini, 'w') as f:
f.write(conf)
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Ignoring indexes:" in result.stdout, str(result)
assert (
"DistributionNotFound: No distributions at all found for INITools"
in result.stdout
)
| mit |
Stuxnet-Kernel/kernel_g3 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
CGar4/myzsh | plugins/zsh-git-prompt/gitstatus.py | 13 | 2323 | #!/usr/bin/env python
from __future__ import print_function
# change this symbol to whatever you prefer
prehash = ':'
from subprocess import Popen, PIPE
import sys
gitsym = Popen(['git', 'symbolic-ref', 'HEAD'], stdout=PIPE, stderr=PIPE)
branch, error = gitsym.communicate()
error_string = error.decode('utf-8')
if 'fatal: Not a git repository' in error_string:
sys.exit(0)
branch = branch.decode("utf-8").strip()[11:]
res, err = Popen(['git','diff','--name-status'], stdout=PIPE, stderr=PIPE).communicate()
err_string = err.decode('utf-8')
if 'fatal' in err_string:
sys.exit(0)
changed_files = [namestat[0] for namestat in res.decode("utf-8").splitlines()]
staged_files = [namestat[0] for namestat in Popen(['git','diff', '--staged','--name-status'], stdout=PIPE).communicate()[0].splitlines()]
nb_changed = len(changed_files) - changed_files.count('U')
nb_U = staged_files.count('U')
nb_staged = len(staged_files) - nb_U
staged = str(nb_staged)
conflicts = str(nb_U)
changed = str(nb_changed)
nb_untracked = len([0 for status in Popen(['git','status','--porcelain',],stdout=PIPE).communicate()[0].decode("utf-8").splitlines() if status.startswith('??')])
untracked = str(nb_untracked)
ahead, behind = 0,0
if not branch: # not on any branch
branch = prehash + Popen(['git','rev-parse','--short','HEAD'], stdout=PIPE).communicate()[0].decode("utf-8")[:-1]
else:
remote_name = Popen(['git','config','branch.%s.remote' % branch], stdout=PIPE).communicate()[0].decode("utf-8").strip()
if remote_name:
merge_name = Popen(['git','config','branch.%s.merge' % branch], stdout=PIPE).communicate()[0].decode("utf-8").strip()
if remote_name == '.': # local
remote_ref = merge_name
else:
remote_ref = 'refs/remotes/%s/%s' % (remote_name, merge_name[11:])
revgit = Popen(['git', 'rev-list', '--left-right', '%s...HEAD' % remote_ref],stdout=PIPE, stderr=PIPE)
revlist = revgit.communicate()[0]
if revgit.poll(): # fallback to local
revlist = Popen(['git', 'rev-list', '--left-right', '%s...HEAD' % merge_name],stdout=PIPE, stderr=PIPE).communicate()[0]
behead = revlist.decode("utf-8").splitlines()
ahead = len([x for x in behead if x[0]=='>'])
behind = len(behead) - ahead
out = ' '.join([
branch,
str(ahead),
str(behind),
staged,
conflicts,
changed,
untracked,
])
print(out, end='')
| mit |
jasonbrooks/kubernetes | hack/boilerplate/boilerplate.py | 300 | 6214 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, or 2017; company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
shaufi10/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/logreport.py | 386 | 1736 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import tempfile
LOG_DEBUG='debug'
LOG_INFO='info'
LOG_WARNING='warn'
LOG_ERROR='error'
LOG_CRITICAL='critical'
_logger = logging.getLogger(__name__)
def log_detail(self):
import os
logfile_name = os.path.join(tempfile.gettempdir(), "openerp_report_designer.log")
hdlr = logging.FileHandler(logfile_name)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_logger.addHandler(hdlr)
_logger.setLevel(logging.INFO)
class Logger(object):
def log_write(self, name, level, msg):
getattr(_logger,level)(msg)
def shutdown(self):
logging.shutdown()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wkeyword/pip | pip/_vendor/cachecontrol/heuristics.py | 374 | 4053 | import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.now()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
warning_header_value = self.warning(response)
response.headers.update(self.update_headers(response))
if warning_header_value is not None:
response.headers.update({'Warning': warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = expire_after(timedelta(days=1),
date=datetime(*date[:6]))
headers['expires'] = datetime_to_header(expires)
headers['cache-control'] = 'public'
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {
'expires': datetime_to_header(expires),
'cache-control': 'public',
}
def warning(self, response):
tmpl = '110 - Automatically cached for %s. Response might be stale'
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = set([
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
])
def update_headers(self, resp):
headers = resp.headers
if 'expires' in headers:
return {}
if 'cache-control' in headers and headers['cache-control'] != 'public':
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if 'date' not in headers or 'last-modified' not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers['date']))
last_modified = parsedate(headers['last-modified'])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None
| mit |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/sql/tools/ssl_certs/create.py | 2 | 3612 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Creates an SSL certificate for a Cloud SQL instance."""
import os
from googlecloudsdk.core.util import files
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.util import list_printer
from googlecloudsdk.sql import util
class AddCert(base.Command):
"""Creates an SSL certificate for a Cloud SQL instance."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'common_name',
help='User supplied name. Constrained to [a-zA-Z.-_ ]+.')
parser.add_argument(
'cert_file',
default=None,
help=('Location of file which the private key of the created ssl-cert'
' will be written to.'))
@util.ReraiseHttpException
def Run(self, args):
"""Creates an SSL certificate for a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the create
operation if the create was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
if os.path.exists(args.cert_file):
raise exceptions.ToolException('file [{path}] already exists'.format(
path=args.cert_file))
# First check if args.out_file is writeable. If not, abort and don't create
# the useless cert.
try:
with files.OpenForWritingPrivate(args.cert_file) as cf:
cf.write('placeholder\n')
except OSError as e:
raise exceptions.ToolException('unable to write [{path}]: {error}'.format(
path=args.cert_file, error=str(e)))
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
util.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
# TODO(user): figure out how to rectify the common_name and the
# sha1fingerprint, so that things can work with the resource parser.
result = sql_client.sslCerts.Insert(
sql_messages.SqlSslCertsInsertRequest(
project=instance_ref.project,
instance=instance_ref.instance,
sslCertsInsertRequest=sql_messages.SslCertsInsertRequest(
commonName=args.common_name)))
private_key = result.clientCert.certPrivateKey
with files.OpenForWritingPrivate(args.cert_file) as cf:
cf.write(private_key)
cf.write('\n')
cert_ref = resources.Create(
collection='sql.sslCerts',
project=instance_ref.project,
instance=instance_ref.instance,
sha1Fingerprint=result.clientCert.certInfo.sha1Fingerprint)
log.CreatedResource(cert_ref)
return result
def Display(self, args, result):
"""Display prints information about what just happened to stdout.
Args:
args: The same as the args in Run.
result: A dict object representing the response if the api
request was successful.
"""
list_printer.PrintResourceList('sql.sslCerts', [result.clientCert.certInfo])
| apache-2.0 |
appsecyogi/Mobile-Security-Framework-MobSF | StaticAnalyzer/tools/enjarify/enjarify/jvm/optimization/consts.py | 13 | 3287 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from .. import scalartypes as scalars
from .. import ir
def allocateRequiredConstants(pool, long_irs):
# see comments in writebytecode.finishCodeAttrs
# We allocate the constants pretty much greedily. This is far from optimal,
# but it shouldn't be a big deal since this code is almost never required
# in the first place. In fact, there are no known real world classes that
# even come close to exhausting the constant pool.
# print('{} methods potentially too long'.format(len(long_irs)))
# print(sorted([_ir.upper_bound for _ir in long_irs], reverse=True))
# for _ir in long_irs:
# print(_ir.method.id.triple(), _ir.upper_bound)
narrow_pairs = collections.Counter()
wide_pairs = collections.Counter()
alt_lens = {}
for _ir in long_irs:
for ins in _ir.flat_instructions:
if isinstance(ins, ir.PrimConstant):
key = ins.cpool_key()
alt_lens[key] = len(ins.bytecode)
if scalars.iswide(ins.st):
if len(ins.bytecode) > 3:
wide_pairs[key] += 1
else:
if len(ins.bytecode) > 2:
narrow_pairs[key] += 1
# see if already in the constant pool
for x in pool.vals:
del narrow_pairs[x]
del wide_pairs[x]
# if we have enough space for all required constants, preferentially allocate
# most commonly used constants to first 255 slots
if pool.space() >= len(narrow_pairs) + 2*len(wide_pairs) and pool.lowspace() > 0:
# We can't use Counter.most_common here because it is nondeterminstic in
# the case of ties.
most_common = sorted(narrow_pairs, key=lambda p:(-narrow_pairs[p], p))
for key in most_common[:pool.lowspace()]:
pool.insertDirectly(key, True)
del narrow_pairs[key]
scores = {}
for p, count in narrow_pairs.items():
scores[p] = (alt_lens[p] - 3) * count
for p, count in wide_pairs.items():
scores[p] = (alt_lens[p] - 3) * count
# sort by score
narrowq = sorted(narrow_pairs, key=lambda p:(scores[p], p))
wideq = sorted(wide_pairs, key=lambda p:(scores[p], p))
while pool.space() >= 1 and (narrowq or wideq):
if not narrowq and pool.space() < 2:
break
wscore = sum(scores[p] for p in wideq[-1:])
nscore = sum(scores[p] for p in narrowq[-2:])
if pool.space() >= 2 and wscore > nscore and wscore > 0:
pool.insertDirectly(wideq.pop(), False)
elif nscore > 0:
pool.insertDirectly(narrowq.pop(), True)
else:
break
| gpl-3.0 |
goldeneye-source/ges-python | lib/encodings/__init__.py | 764 | 5067 | """ Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must return a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from . import aliases
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
if isinstance(encoding, bytes):
encoding = str(encoding, "ascii")
chars = []
punct = False
for c in encoding:
if c.isalnum() or c == '.':
if punct and chars:
chars.append('_')
chars.append(c)
punct = False
else:
punct = True
return ''.join(chars)
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError('module "%s" (%s) failed to register'
% (mod.__name__, mod.__file__))
if not callable(entry[0]) or not callable(entry[1]) or \
(entry[2] is not None and not callable(entry[2])) or \
(entry[3] is not None and not callable(entry[3])) or \
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
% (mod.__name__, mod.__file__))
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
| gpl-3.0 |
Lujeni/ansible | contrib/inventory/spacewalk.py | 28 | 8792 | #!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <[email protected]>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <[email protected]> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import time
from optparse import OptionParser
import subprocess
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
# ------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
# ------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
# ------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
# ------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = {"hostvars": {}}
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta
print(json.dumps(final))
# print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
# ------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
| gpl-3.0 |
mqingyn/pyutils | pyutils/storage.py | 1 | 10882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
set
except NameError:
from sets import Set as set
from pyutils.strtools import safeunicode
from threading import local as threadlocal
import warnings
import sys,copy
PY3 = (sys.version_info >= (3,))
class Storage(dict):
"""
from web.py
对字典进行扩展,使其支持通过 dict.a形式访问以代替dict['a']
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
# if _unicode is callable object, use it convert a string to unicode.
to_unicode = safeunicode
if _unicode is not False and hasattr(_unicode, "__call__"):
to_unicode = _unicode
def unicodify(s):
if _unicode and isinstance(s, str):
return to_unicode(s)
else:
return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
# This, and insert() are deprecated because they cannot be implemented
# using collections.OrderedDict (Python 2.7 and up), which we'll
# eventually switch to
warnings.warn(
"SortedDict.value_for_index is deprecated", PendingDeprecationWarning,
stacklevel=2
)
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
warnings.warn(
"SortedDict.insert is deprecated", PendingDeprecationWarning,
stacklevel=2
)
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.iteritems()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
sorteddict = SortedDict
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in list(ThreadedDict._instances):
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return self.__dict__.iterkeys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return self.__dict__.itervalues()
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '<ThreadedDict %r>' % self.__dict__
__str__ = __repr__
| bsd-3-clause |
kalxas/QGIS | tests/src/python/test_qgsbox3d.py | 45 | 7825 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsBox3d.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '11/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsBox3d,
QgsPointXY,
QgsPoint,
QgsWkbTypes,
QgsRectangle)
from qgis.testing import unittest
class TestQgsBox3d(unittest.TestCase):
def testCtor(self):
box = QgsBox3d(5.0, 6.0, 7.0, 10.0, 11.0, 12.0)
self.assertEqual(box.xMinimum(), 5.0)
self.assertEqual(box.yMinimum(), 6.0)
self.assertEqual(box.zMinimum(), 7.0)
self.assertEqual(box.xMaximum(), 10.0)
self.assertEqual(box.yMaximum(), 11.0)
self.assertEqual(box.zMaximum(), 12.0)
box = QgsBox3d(QgsPoint(5, 6, 7), QgsPoint(10, 11, 12))
self.assertEqual(box.xMinimum(), 5.0)
self.assertEqual(box.yMinimum(), 6.0)
self.assertEqual(box.zMinimum(), 7.0)
self.assertEqual(box.xMaximum(), 10.0)
self.assertEqual(box.yMaximum(), 11.0)
self.assertEqual(box.zMaximum(), 12.0)
# point constructor should normalize
box = QgsBox3d(QgsPoint(10, 11, 12), QgsPoint(5, 6, 7))
self.assertEqual(box.xMinimum(), 5.0)
self.assertEqual(box.yMinimum(), 6.0)
self.assertEqual(box.zMinimum(), 7.0)
self.assertEqual(box.xMaximum(), 10.0)
self.assertEqual(box.yMaximum(), 11.0)
self.assertEqual(box.zMaximum(), 12.0)
box = QgsBox3d(QgsRectangle(5, 6, 11, 13))
self.assertEqual(box.xMinimum(), 5.0)
self.assertEqual(box.yMinimum(), 6.0)
self.assertEqual(box.zMinimum(), 0.0)
self.assertEqual(box.xMaximum(), 11.0)
self.assertEqual(box.yMaximum(), 13.0)
self.assertEqual(box.zMaximum(), 0.0)
def testSetters(self):
box = QgsBox3d(5.0, 6.0, 7.0, 10.0, 11.0, 12.0)
box.setXMinimum(35.0)
box.setYMinimum(36.0)
box.setZMinimum(37.0)
box.setXMaximum(40.0)
box.setYMaximum(41.0)
box.setZMaximum(42.0)
self.assertEqual(box.xMinimum(), 35.0)
self.assertEqual(box.yMinimum(), 36.0)
self.assertEqual(box.zMinimum(), 37.0)
self.assertEqual(box.xMaximum(), 40.0)
self.assertEqual(box.yMaximum(), 41.0)
self.assertEqual(box.zMaximum(), 42.0)
def testNormalize(self):
box = QgsBox3d()
box.setXMinimum(10.0)
box.setYMinimum(11.0)
box.setZMinimum(12.0)
box.setXMaximum(5.0)
box.setYMaximum(6.0)
box.setZMaximum(7.0)
box.normalize()
self.assertEqual(box.xMinimum(), 5.0)
self.assertEqual(box.yMinimum(), 6.0)
self.assertEqual(box.zMinimum(), 7.0)
self.assertEqual(box.xMaximum(), 10.0)
self.assertEqual(box.yMaximum(), 11.0)
self.assertEqual(box.zMaximum(), 12.0)
def testDimensions(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertEqual(box.width(), 6.0)
self.assertEqual(box.height(), 7.0)
self.assertEqual(box.depth(), 8.0)
def testIntersect(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
box2 = box.intersect(QgsBox3d(7.0, 8.0, 9.0, 10.0, 11.0, 12.0))
self.assertEqual(box2.xMinimum(), 7.0)
self.assertEqual(box2.yMinimum(), 8.0)
self.assertEqual(box2.zMinimum(), 9.0)
self.assertEqual(box2.xMaximum(), 10.0)
self.assertEqual(box2.yMaximum(), 11.0)
self.assertEqual(box2.zMaximum(), 12.0)
box2 = box.intersect(QgsBox3d(0.0, 1.0, 2.0, 100.0, 111.0, 112.0))
self.assertEqual(box2.xMinimum(), 5.0)
self.assertEqual(box2.yMinimum(), 6.0)
self.assertEqual(box2.zMinimum(), 7.0)
self.assertEqual(box2.xMaximum(), 11.0)
self.assertEqual(box2.yMaximum(), 13.0)
self.assertEqual(box2.zMaximum(), 15.0)
box2 = box.intersect(QgsBox3d(1.0, 2.0, 3.0, 6.0, 7.0, 8.0))
self.assertEqual(box2.xMinimum(), 5.0)
self.assertEqual(box2.yMinimum(), 6.0)
self.assertEqual(box2.zMinimum(), 7.0)
self.assertEqual(box2.xMaximum(), 6.0)
self.assertEqual(box2.yMaximum(), 7.0)
self.assertEqual(box2.zMaximum(), 8.0)
def testIntersects(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertTrue(box.intersects(QgsBox3d(7.0, 8.0, 9.0, 10.0, 11.0, 12.0)))
self.assertTrue(box.intersects(QgsBox3d(0.0, 1.0, 2.0, 100.0, 111.0, 112.0)))
self.assertTrue(box.intersects(QgsBox3d(1.0, 2.0, 3.0, 6.0, 7.0, 8.0)))
self.assertFalse(box.intersects(QgsBox3d(15.0, 16.0, 17.0, 110.0, 112.0, 113.0)))
self.assertFalse(box.intersects(QgsBox3d(5.0, 6.0, 17.0, 11.0, 13.0, 113.0)))
self.assertFalse(box.intersects(QgsBox3d(5.0, 16.0, 7.0, 11.0, 23.0, 15.0)))
self.assertFalse(box.intersects(QgsBox3d(15.0, 6.0, 7.0, 21.0, 13.0, 15.0)))
def testContains(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertTrue(box.contains(QgsBox3d(7.0, 8.0, 9.0, 10.0, 11.0, 12.0)))
self.assertFalse(box.contains(QgsBox3d(0.0, 1.0, 2.0, 100.0, 111.0, 112.0)))
self.assertFalse(box.contains(QgsBox3d(1.0, 2.0, 3.0, 6.0, 7.0, 8.0)))
self.assertFalse(box.contains(QgsBox3d(15.0, 16.0, 17.0, 110.0, 112.0, 113.0)))
self.assertFalse(box.contains(QgsBox3d(5.0, 6.0, 17.0, 11.0, 13.0, 113.0)))
self.assertFalse(box.contains(QgsBox3d(5.0, 16.0, 7.0, 11.0, 23.0, 15.0)))
self.assertFalse(box.contains(QgsBox3d(15.0, 6.0, 7.0, 21.0, 13.0, 15.0)))
def testContainsPoint(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertTrue(box.contains(QgsPoint(6, 7, 8)))
self.assertFalse(box.contains(QgsPoint(16, 7, 8)))
self.assertFalse(box.contains(QgsPoint(6, 17, 8)))
self.assertFalse(box.contains(QgsPoint(6, 7, 18)))
# 2d containment
self.assertTrue(box.contains(QgsPoint(6, 7)))
self.assertFalse(box.contains(QgsPoint(16, 7)))
self.assertFalse(box.contains(QgsPoint(6, 17)))
def testVolume(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertEqual(box.volume(), 336.0)
def testToRectangle(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
rect = box.toRectangle()
self.assertEqual(rect, QgsRectangle(5, 6, 11, 13))
def testIs2d(self):
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertFalse(box.is2d())
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 7.0)
self.assertTrue(box.is2d())
box = QgsBox3d(5.0, 6.0, 0.0, 11.0, 13.0, 0.0)
self.assertTrue(box.is2d())
box = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, -7.0)
self.assertTrue(box.is2d())
def testEquality(self):
box1 = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
box2 = QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 15.0)
self.assertEqual(box1, box2)
self.assertNotEqual(box1, QgsBox3d(5.0, 6.0, 7.0, 11.0, 13.0, 14.0))
self.assertNotEqual(box1, QgsBox3d(5.0, 6.0, 7.0, 11.0, 41.0, 15.0))
self.assertNotEqual(box1, QgsBox3d(5.0, 6.0, 7.0, 12.0, 13.0, 15.0))
self.assertNotEqual(box1, QgsBox3d(5.0, 6.0, 17.0, 11.0, 13.0, 15.0))
self.assertNotEqual(box1, QgsBox3d(5.0, 16.0, 7.0, 11.0, 13.0, 15.0))
self.assertNotEqual(box1, QgsBox3d(52.0, 6.0, 7.0, 11.0, 13.0, 15.0))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Beauhurst/django | django/conf/urls/i18n.py | 29 | 1119 | import functools
from django.conf import settings
from django.conf.urls import url
from django.urls import LocaleRegexURLResolver, get_resolver
from django.views.i18n import set_language
def i18n_patterns(*urls, prefix_default_language=True):
"""
Add the language code prefix to every URL pattern within this function.
This may only be used in the root URLconf, not in an included URLconf.
"""
if not settings.USE_I18N:
return list(urls)
return [LocaleRegexURLResolver(list(urls), prefix_default_language=prefix_default_language)]
@functools.lru_cache(maxsize=None)
def is_language_prefix_patterns_used(urlconf):
"""
Return a tuple of two booleans: (
`True` if LocaleRegexURLResolver` is used in the `urlconf`,
`True` if the default language should be prefixed
)
"""
for url_pattern in get_resolver(urlconf).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True, url_pattern.prefix_default_language
return False, False
urlpatterns = [
url(r'^setlang/$', set_language, name='set_language'),
]
| bsd-3-clause |
OaklandPeters/validator | setup.py | 11 | 1831 | from setuptools import setup
# Filling in this template requires filling in:
# name
# description
# packages
# classifiers
# Development Status ::
# ... it would also be beneficial to study/fill in other classifiers
#
# Also will benefit from confirming the url -- which may change frequently
# ... such as if not using bitbucket
def TEMPLATE(placeholder='unspecified'):
"""This function exists only to prevent you from running setup.py wihtout
filling in necessary parts. Delete TEMPLATE in the filled-in version."""
raise Exception("Template has not yet been filled in for: "+placeholder)
setup(
name=TEMPLATE('{package-name}'),
version=open('VERSION').read().strip(),
author='Oakland John Peters',
author_email='[email protected]',
description=TEMPLATE('{long-description'),
long_description=open('README.rst').read(),
url=TEMPLATE('package: http://bitbucket.org/OPeters/{package-name}'),
license='MIT',
packages=[TEMPLATE('{package-name}')],
classifiers=[
#Select one 'Development Status'
#'Development Status :: 1 - Planning',
#'Development Status :: 2 - Pre-Alpha',
#'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Topic :: Utilities' #only if appropriate
]
)
| mit |
luci/recipes-py | recipe_modules/path/config.py | 2 | 1502 | # Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine.config import config_item_context, ConfigGroup, Dict, Static
from recipe_engine.config_types import Path
def BaseConfig(PLATFORM, START_DIR, TEMP_DIR, CACHE_DIR, CLEANUP_DIR, HOME_DIR,
**_kwargs):
assert START_DIR[0].endswith(('\\', '/')), START_DIR
assert TEMP_DIR[0].endswith(('\\', '/')), TEMP_DIR
assert CACHE_DIR[0].endswith(('\\', '/')), CACHE_DIR
assert CLEANUP_DIR[0].endswith(('\\', '/')), CLEANUP_DIR
assert HOME_DIR[0].endswith(('\\', '/')), HOME_DIR
return ConfigGroup(
# base path name -> [tokenized absolute path]
base_paths=Dict(value_type=tuple),
# dynamic path name -> Path object (referencing one of the base_paths)
dynamic_paths=Dict(value_type=(Path, type(None))),
PLATFORM=Static(PLATFORM),
START_DIR=Static(tuple(START_DIR)),
TEMP_DIR=Static(tuple(TEMP_DIR)),
CACHE_DIR=Static(tuple(CACHE_DIR)),
CLEANUP_DIR=Static(tuple(CLEANUP_DIR)),
HOME_DIR=Static(tuple(HOME_DIR)),
)
config_ctx = config_item_context(BaseConfig)
@config_ctx(is_root=True)
def BASE(c):
c.base_paths['start_dir'] = c.START_DIR
c.base_paths['tmp_base'] = c.TEMP_DIR
c.base_paths['cache'] = c.CACHE_DIR
c.base_paths['cleanup'] = c.CLEANUP_DIR
c.base_paths['home'] = c.HOME_DIR
c.dynamic_paths['checkout'] = None
| apache-2.0 |
benspaulding/django | django/db/models/sql/constants.py | 16 | 1108 | from collections import namedtuple
import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = set([
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
JoinInfo = namedtuple('JoinInfo',
'table_name rhs_alias join_type lhs_alias '
'lhs_join_col rhs_join_col nullable')
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
| bsd-3-clause |
tornadomeet/mxnet | example/reinforcement-learning/a3c/launcher.py | 34 | 5327 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Submission job for local jobs."""
# pylint: disable=invalid-name
from __future__ import absolute_import
import sys
import os
import subprocess
import logging
from threading import Thread
import argparse
import signal
sys.path.append(os.path.join(os.environ['HOME'], "mxnet/dmlc-core/tracker"))
sys.path.append(os.path.join('/scratch', "mxnet/dmlc-core/tracker"))
from dmlc_tracker import tracker
keepalive = """
nrep=0
rc=254
while [ $rc -ne 0 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
"""
def exec_cmd(cmd, role, taskid, pass_env):
"""Execute the command line command."""
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
cmd[0] = './' + cmd[0]
cmd = ' '.join(cmd)
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_TASK_ID'] = str(taskid)
env['DMLC_ROLE'] = role
env['DMLC_JOB_CLUSTER'] = 'local'
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env=env)
if ret != 0:
ntrial += 1
continue
else:
bash = cmd
ret = subprocess.call(bash, shell=True, executable='bash', env=env)
if ret == 0:
logging.debug('Thread %d exit with 0', taskid)
return
else:
if os.name == 'nt':
sys.exit(-1)
else:
raise RuntimeError('Get nonzero return code=%d' % ret)
def submit(args):
gpus = args.gpus.strip().split(',')
"""Submit function of local jobs."""
def mthread_submit(nworker, nserver, envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
----------
nworker: number of slave process to start up
nserver: number of server nodes to start up
envs: enviroment variables to be added to the starting programs
"""
procs = {}
for i, gpu in enumerate(gpus):
for j in range(args.num_threads):
procs[i] = Thread(target=exec_cmd, args=(args.command + ['--gpus=%s'%gpu], 'worker', i*args.num_threads+j, envs))
procs[i].setDaemon(True)
procs[i].start()
for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver):
procs[i] = Thread(target=exec_cmd, args=(args.command, 'server', i, envs))
procs[i].setDaemon(True)
procs[i].start()
# call submit, with nslave, the commands to run each job and submit function
tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit,
pscmd=(' '.join(args.command)))
def signal_handler(signal, frame):
logging.info('Stop launcher')
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('--gpus', type=str, help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('-n', '--num-threads', required=True, type=int,
help = 'number of threads per gpu')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched, \
in default it is equal to NUM_WORKERS')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of slave machines which will run \
the job. Required for ssh and mpi launcher')
parser.add_argument('--sync-dst-dir', type=str,
help = 'if specificed, it will sync the current \
directory into slave machines\'s SYNC_DST_DIR if ssh \
launcher is used')
parser.add_argument('--launcher', type=str, default='local',
choices = ['local', 'ssh', 'mpi', 'sge', 'yarn'],
help = 'the launcher to use')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
args.command += unknown
if args.num_servers is None:
args.num_servers = args.num_threads * len(args.gpus.strip().split(','))
signal.signal(signal.SIGINT, signal_handler)
submit(args)
| apache-2.0 |
devs4v/devs4v-information-retrieval15 | project/venv/lib/python2.7/site-packages/nltk/parse/nonprojectivedependencyparser.py | 2 | 29278 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Jason Narad <[email protected]>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
from __future__ import print_function
import math
import logging
from nltk.compat import xrange
from nltk.parse.dependencygraph import DependencyGraph
from nltk.classify import NaiveBayesClassifier
logger = logging.getLogger(__name__)
#################################################################
# DependencyScorerI - Interface for Graph-Edge Weight Calculation
#################################################################
class DependencyScorerI(object):
"""
A scorer for calculated the weights on the edges of a weighted
dependency graph. This is used by a
``ProbabilisticNonprojectiveParser`` to initialize the edge
weights of a ``DependencyGraph``. While typically this would be done
by training a binary classifier, any class that can return a
multidimensional list representation of the edge weights can
implement this interface. As such, it has no necessary
fields.
"""
def __init__(self):
if self.__class__ == DependencyScorerI:
raise TypeError('DependencyScorerI is an abstract interface')
def train(self, graphs):
"""
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
Typically the edges present in the graphs can be used as
positive training examples, and the edges not present as negative
examples.
"""
raise NotImplementedError()
def score(self, graph):
"""
:type graph: DependencyGraph
:param graph: A dependency graph whose set of edges need to be
scored.
:rtype: A three-dimensional list of numbers.
:return: The score is returned in a multidimensional(3) list, such
that the outer-dimension refers to the head, and the
inner-dimension refers to the dependencies. For instance,
scores[0][1] would reference the list of scores corresponding to
arcs from node 0 to node 1. The node's 'address' field can be used
to determine its number identification.
For further illustration, a score list corresponding to Fig.2 of
Keith Hall's 'K-best Spanning Tree Parsing' paper:
scores = [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
When used in conjunction with a MaxEntClassifier, each score would
correspond to the confidence of a particular edge being classified
with the positive training examples.
"""
raise NotImplementedError()
#################################################################
# NaiveBayesDependencyScorer
#################################################################
class NaiveBayesDependencyScorer(DependencyScorerI):
"""
A dependency scorer built around a MaxEnt classifier. In this
particular class that classifier is a ``NaiveBayesClassifier``.
It uses head-word, head-tag, child-word, and child-tag features
for classification.
>>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2
>>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train(graphs, NaiveBayesDependencyScorer())
>>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
>>> len(list(parses))
1
"""
def __init__(self):
pass # Do nothing without throwing error
def train(self, graphs):
"""
Trains a ``NaiveBayesClassifier`` using the edges present in
graphs list as positive examples, the edges not present as
negative examples. Uses a feature vector of head-word,
head-tag, child-word, and child-tag.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
"""
# Create training labeled training examples
labeled_examples = []
for graph in graphs:
for head_node in graph.nodes.values():
for child_index, child_node in graph.nodes.items():
if child_index in head_node['deps']:
label = "T"
else:
label = "F"
labeled_examples.append(
(
dict(
a=head_node['word'],
b=head_node['tag'],
c=child_node['word'],
d=child_node['tag'],
),
label,
)
)
self.classifier = NaiveBayesClassifier.train(labeled_examples)
def score(self, graph):
"""
Converts the graph into a feature-based representation of
each edge, and then assigns a score to each based on the
confidence of the classifier in assigning it to the
positive label. Scores are returned in a multidimensional list.
:type graph: DependencyGraph
:param graph: A dependency graph to score.
:rtype: 3 dimensional list
:return: Edge scores for the graph parameter.
"""
# Convert graph to feature representation
edges = []
for head_node in graph.nodes.values():
for child_node in graph.nodes.values():
edges.append(
(
dict(
a=head_node['word'],
b=head_node['tag'],
c=child_node['word'],
d=child_node['tag'],
)
)
)
# Score edges
edge_scores = []
row = []
count = 0
for pdist in self.classifier.prob_classify_many(edges):
logger.debug('%.4f %.4f', pdist.prob('T'), pdist.prob('F'))
# smoothing in case the probability = 0
row.append([math.log(pdist.prob("T")+0.00000000001)])
count += 1
if count == len(graph.nodes):
edge_scores.append(row)
row = []
count = 0
return edge_scores
#################################################################
# A Scorer for Demo Purposes
#################################################################
# A short class necessary to show parsing example from paper
class DemoScorer(DependencyScorerI):
def train(self, graphs):
print('Training...')
def score(self, graph):
# scores for Keith Hall 'K-best Spanning Tree Parsing' paper
return [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
#################################################################
# Non-Projective Probabilistic Parsing
#################################################################
class ProbabilisticNonprojectiveParser(object):
"""A probabilistic non-projective dependency parser.
Nonprojective dependencies allows for "crossing branches" in the parse tree
which is necessary for representing particular linguistic phenomena, or even
typical parses in some languages. This parser follows the MST parsing
algorithm, outlined in McDonald(2005), which likens the search for the best
non-projective parse to finding the maximum spanning tree in a weighted
directed graph.
>>> class Scorer(DependencyScorerI):
... def train(self, graphs):
... pass
...
... def score(self, graph):
... return [
... [[], [5], [1], [1]],
... [[], [], [11], [4]],
... [[], [10], [], [5]],
... [[], [8], [8], []],
... ]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train([], Scorer())
>>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
>>> len(list(parses))
1
Rule based example
------------------
>>> from nltk.grammar import DependencyGrammar
>>> grammar = DependencyGrammar.fromstring('''
... 'taught' -> 'play' | 'man'
... 'man' -> 'the' | 'in'
... 'in' -> 'corner'
... 'corner' -> 'the'
... 'play' -> 'golf' | 'dachshund' | 'to'
... 'dachshund' -> 'his'
... ''')
>>> ndp = NonprojectiveDependencyParser(grammar)
>>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
>>> len(list(parses))
4
"""
def __init__(self):
"""
Creates a new non-projective parser.
"""
logging.debug('initializing prob. nonprojective...')
def train(self, graphs, dependency_scorer):
"""
Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,
and establishes this as the parser's scorer. This is used to
initialize the scores on a ``DependencyGraph`` during the parsing
procedure.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
:type dependency_scorer: DependencyScorerI
:param dependency_scorer: A scorer which implements the
``DependencyScorerI`` interface.
"""
self._scorer = dependency_scorer
self._scorer.train(graphs)
def initialize_edge_scores(self, graph):
"""
Assigns a score to every edge in the ``DependencyGraph`` graph.
These scores are generated via the parser's scorer which
was assigned during the training process.
:type graph: DependencyGraph
:param graph: A dependency graph to assign scores to.
"""
self.scores = self._scorer.score(graph)
def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
"""
Takes a list of nodes that have been identified to belong to a cycle,
and collapses them into on larger node. The arcs of all nodes in
the graph must be updated to account for this.
:type new_node: Node.
:param new_node: A Node (Dictionary) to collapse the cycle nodes into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses, each of which is in the cycle.
:type g_graph, b_graph, c_graph: DependencyGraph
:param g_graph, b_graph, c_graph: Graphs which need to be updated.
"""
logger.debug('Collapsing nodes...')
# Collapse all cycle nodes into v_n+1 in G_Graph
for cycle_node_index in cycle_path:
g_graph.remove_by_address(cycle_node_index)
g_graph.add_node(new_node)
g_graph.redirect_arcs(cycle_path, new_node['address'])
def update_edge_scores(self, new_node, cycle_path):
"""
Updates the edge scores to reflect a collapse operation into
new_node.
:type new_node: A Node.
:param new_node: The node which cycle nodes are collapsed into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses that belong to the cycle.
"""
logger.debug('cycle %s', cycle_path)
cycle_path = self.compute_original_indexes(cycle_path)
logger.debug('old cycle %s', cycle_path)
logger.debug('Prior to update: %s', self.scores)
for i, row in enumerate(self.scores):
for j, column in enumerate(self.scores[i]):
logger.debug(self.scores[i][j])
if (
j in cycle_path
and i not in cycle_path
and self.scores[i][j]
):
subtract_val = self.compute_max_subtract_score(j, cycle_path)
logger.debug('%s - %s', self.scores[i][j], subtract_val)
new_vals = []
for cur_val in self.scores[i][j]:
new_vals.append(cur_val - subtract_val)
self.scores[i][j] = new_vals
for i, row in enumerate(self.scores):
for j, cell in enumerate(self.scores[i]):
if i in cycle_path and j in cycle_path:
self.scores[i][j] = []
logger.debug('After update: %s', self.scores)
def compute_original_indexes(self, new_indexes):
"""
As nodes are collapsed into others, they are replaced
by the new node in the graph, but it's still necessary
to keep track of what these original nodes were. This
takes a list of node addresses and replaces any collapsed
node addresses with their original addresses.
:type new_indexes: A list of integers.
:param new_indexes: A list of node addresses to check for
subsumed nodes.
"""
swapped = True
while swapped:
originals = []
swapped = False
for new_index in new_indexes:
if new_index in self.inner_nodes:
for old_val in self.inner_nodes[new_index]:
if old_val not in originals:
originals.append(old_val)
swapped = True
else:
originals.append(new_index)
new_indexes = originals
return new_indexes
def compute_max_subtract_score(self, column_index, cycle_indexes):
"""
When updating scores the score of the highest-weighted incoming
arc is subtracted upon collapse. This returns the correct
amount to subtract from that edge.
:type column_index: integer.
:param column_index: A index representing the column of incoming arcs
to a particular node being updated
:type cycle_indexes: A list of integers.
:param cycle_indexes: Only arcs from cycle nodes are considered. This
is a list of such nodes addresses.
"""
max_score = -100000
for row_index in cycle_indexes:
for subtract_val in self.scores[row_index][column_index]:
if subtract_val > max_score:
max_score = subtract_val
return max_score
def best_incoming_arc(self, node_index):
"""
Returns the source of the best incoming arc to the
node with address: node_index
:type node_index: integer.
:param node_index: The address of the 'destination' node,
the node that is arced to.
"""
originals = self.compute_original_indexes([node_index])
logger.debug('originals: %s', originals)
max_arc = None
max_score = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
# print self.scores[row_index][col_index]
if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):
max_score = self.scores[row_index][col_index]
max_arc = row_index
logger.debug('%s, %s', row_index, col_index)
logger.debug(max_score)
for key in self.inner_nodes:
replaced_nodes = self.inner_nodes[key]
if max_arc in replaced_nodes:
return key
return max_arc
def original_best_arc(self, node_index):
originals = self.compute_original_indexes([node_index])
max_arc = None
max_score = None
max_orig = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):
max_score = self.scores[row_index][col_index]
max_arc = row_index
max_orig = col_index
return [max_arc, max_orig]
def parse(self, tokens, tags):
"""
Parses a list of tokens in accordance to the MST parsing algorithm
for non-projective dependency parses. Assumes that the tokens to
be parsed have already been tagged and those tags are provided. Various
scoring methods can be used by implementing the ``DependencyScorerI``
interface and passing it to the training algorithm.
:type tokens: list(str)
:param tokens: A list of words or punctuation to be parsed.
:type tags: list(str)
:param tags: A list of tags corresponding by index to the words in the tokens list.
:return: An iterator of non-projective parses.
:rtype: iter(DependencyGraph)
"""
self.inner_nodes = {}
# Initialize g_graph
g_graph = DependencyGraph()
for index, token in enumerate(tokens):
g_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index + 1,
}
)
#print (g_graph.nodes)
# Fully connect non-root nodes in g_graph
g_graph.connect_graph()
original_graph = DependencyGraph()
for index, token in enumerate(tokens):
original_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index+1,
}
)
b_graph = DependencyGraph()
c_graph = DependencyGraph()
for index, token in enumerate(tokens):
c_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index + 1,
}
)
# Assign initial scores to g_graph edges
self.initialize_edge_scores(g_graph)
logger.debug(self.scores)
# Initialize a list of unvisited vertices (by node address)
unvisited_vertices = [
vertex['address'] for vertex in c_graph.nodes.values()
]
# Iterate over unvisited vertices
nr_vertices = len(tokens)
betas = {}
while unvisited_vertices:
# Mark current node as visited
current_vertex = unvisited_vertices.pop(0)
logger.debug('current_vertex: %s', current_vertex)
# Get corresponding node n_i to vertex v_i
current_node = g_graph.get_by_address(current_vertex)
logger.debug('current_node: %s', current_node)
# Get best in-edge node b for current node
best_in_edge = self.best_incoming_arc(current_vertex)
betas[current_vertex] = self.original_best_arc(current_vertex)
logger.debug('best in arc: %s --> %s', best_in_edge, current_vertex)
# b_graph = Union(b_graph, b)
for new_vertex in [current_vertex, best_in_edge]:
b_graph.nodes[new_vertex].update(
{
'word': 'TEMP',
'rel': 'NTOP',
'address': new_vertex,
}
)
b_graph.add_arc(best_in_edge, current_vertex)
# Beta(current node) = b - stored for parse recovery
# If b_graph contains a cycle, collapse it
cycle_path = b_graph.contains_cycle()
if cycle_path:
# Create a new node v_n+1 with address = len(nodes) + 1
new_node = {
'word': 'NONE',
'rel': 'NTOP',
'address': nr_vertices + 1,
}
# c_graph = Union(c_graph, v_n+1)
c_graph.add_node(new_node)
# Collapse all nodes in cycle C into v_n+1
self.update_edge_scores(new_node, cycle_path)
self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
for cycle_index in cycle_path:
c_graph.add_arc(new_node['address'], cycle_index)
# self.replaced_by[cycle_index] = new_node['address']
self.inner_nodes[new_node['address']] = cycle_path
# Add v_n+1 to list of unvisited vertices
unvisited_vertices.insert(0, nr_vertices + 1)
# increment # of nodes counter
nr_vertices += 1
# Remove cycle nodes from b_graph; B = B - cycle c
for cycle_node_address in cycle_path:
b_graph.remove_by_address(cycle_node_address)
logger.debug('g_graph: %s', g_graph)
logger.debug('b_graph: %s', b_graph)
logger.debug('c_graph: %s', c_graph)
logger.debug('Betas: %s', betas)
logger.debug('replaced nodes %s', self.inner_nodes)
# Recover parse tree
logger.debug('Final scores: %s', self.scores)
logger.debug('Recovering parse...')
for i in range(len(tokens) + 1, nr_vertices + 1):
betas[betas[i][1]] = betas[i]
logger.debug('Betas: %s', betas)
for node in original_graph.nodes.values():
# TODO: It's dangerous to assume that deps it a dictionary
# because it's a default dictionary. Ideally, here we should not
# be concerned how dependencies are stored inside of a dependency
# graph.
node['deps'] = {}
for i in range(1, len(tokens) + 1):
original_graph.add_arc(betas[i][0], betas[i][1])
logger.debug('Done.')
yield original_graph
#################################################################
# Rule-based Non-Projective Parser
#################################################################
class NonprojectiveDependencyParser(object):
"""
A non-projective, rule-based, dependency parser. This parser
will return the set of all possible non-projective parses based on
the word-to-word relations defined in the parser's dependency
grammar, and will allow the branches of the parse tree to cross
in order to capture a variety of linguistic phenomena that a
projective parser will not.
"""
def __init__(self, dependency_grammar):
"""
Creates a new ``NonprojectiveDependencyParser``.
:param dependency_grammar: a grammar of word-to-word relations.
:type dependency_grammar: DependencyGrammar
"""
self._grammar = dependency_grammar
def parse(self, tokens):
"""
Parses the input tokens with respect to the parser's grammar. Parsing
is accomplished by representing the search-space of possible parses as
a fully-connected directed graph. Arcs that would lead to ungrammatical
parses are removed and a lattice is constructed of length n, where n is
the number of input tokens, to represent all possible grammatical
traversals. All possible paths through the lattice are then enumerated
to produce the set of non-projective parses.
param tokens: A list of tokens to parse.
type tokens: list(str)
return: An iterator of non-projective parses.
rtype: iter(DependencyGraph)
"""
# Create graph representation of tokens
self._graph = DependencyGraph()
for index, token in enumerate(tokens):
self._graph.nodes[index] = {
'word': token,
'deps': [],
'rel': 'NTOP',
'address': index,
}
for head_node in self._graph.nodes.values():
deps = []
for dep_node in self._graph.nodes.values() :
if (
self._grammar.contains(head_node['word'], dep_node['word'])
and head_node['word'] != dep_node['word']
):
deps.append(dep_node['address'])
head_node['deps'] = deps
# Create lattice of possible heads
roots = []
possible_heads = []
for i, word in enumerate(tokens):
heads = []
for j, head in enumerate(tokens):
if (i != j) and self._grammar.contains(head, word):
heads.append(j)
if len(heads) == 0:
roots.append(i)
possible_heads.append(heads)
# Set roots to attempt
if len(roots) < 2:
if len(roots) == 0:
for i in range(len(tokens)):
roots.append(i)
# Traverse lattice
analyses = []
for root in roots:
stack = []
analysis = [[] for i in range(len(possible_heads))]
i = 0
forward = True
while i >= 0:
if forward:
if len(possible_heads[i]) == 1:
analysis[i] = possible_heads[i][0]
elif len(possible_heads[i]) == 0:
analysis[i] = -1
else:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
if not forward:
index_on_stack = False
for stack_item in stack:
if stack_item[0] == i:
index_on_stack = True
orig_length = len(possible_heads[i])
if index_on_stack and orig_length == 0:
for j in xrange(len(stack) - 1, -1, -1):
stack_item = stack[j]
if stack_item[0] == i:
possible_heads[i].append(stack.pop(j)[1])
elif index_on_stack and orig_length > 0:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
forward = True
if i + 1 == len(possible_heads):
analyses.append(analysis[:])
forward = False
if forward:
i += 1
else:
i -= 1
# Filter parses
# ensure 1 root, every thing has 1 head
for analysis in analyses:
if analysis.count(-1) > 1:
# there are several root elements!
continue
graph = DependencyGraph()
graph.root = graph.nodes[analysis.index(-1) + 1]
for address, (token, head_index) in enumerate(zip(tokens, analysis), start=1):
head_address = head_index + 1
node = graph.nodes[address]
node.update(
{
'word': token,
'address': address,
}
)
if head_address == 0:
rel = 'ROOT'
else:
rel = ''
graph.nodes[head_index + 1]['deps'][rel].append(address)
# TODO: check for cycles
yield graph
#################################################################
# Demos
#################################################################
def demo():
# hall_demo()
nonprojective_conll_parse_demo()
rule_based_demo()
def hall_demo():
npp = ProbabilisticNonprojectiveParser()
npp.train([], DemoScorer())
for parse_graph in npp.parse(['v1', 'v2', 'v3'], [None, None, None]):
print(parse_graph)
def nonprojective_conll_parse_demo():
from nltk.parse.dependencygraph import conll_data2
graphs = [
DependencyGraph(entry) for entry in conll_data2.split('\n\n') if entry
]
npp = ProbabilisticNonprojectiveParser()
npp.train(graphs, NaiveBayesDependencyScorer())
for parse_graph in npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']):
print(parse_graph)
def rule_based_demo():
from nltk.grammar import DependencyGrammar
grammar = DependencyGrammar.fromstring("""
'taught' -> 'play' | 'man'
'man' -> 'the' | 'in'
'in' -> 'corner'
'corner' -> 'the'
'play' -> 'golf' | 'dachshund' | 'to'
'dachshund' -> 'his'
""")
print(grammar)
ndp = NonprojectiveDependencyParser(grammar)
graphs = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
print('Graphs:')
for graph in graphs:
print(graph)
if __name__ == '__main__':
demo()
| mit |
antoniogrillo/fabcrNFC | node-js/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
arcyfelix/Machine-Learning-For-Trading | 12_stock_price_plotting_range_and_companies.py | 1 | 1448 | import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2013-01-01'
end_date = '2013-12-31'
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
#print(df)
return df
def plot_data(df, title = 'Stock prices'):
myplot = df.plot(title = title, fontsize = 2)
myplot.set_xlabel('Date')
myplot.set_ylabel('Price')
plt.show()
def plot_selected_data(df, start, end, columns, title = 'Stock prices'):
df = df.ix[start : end, columns]
myplot = df.plot(title = title, fontsize = 10)
myplot.set_xlabel('Date')
myplot.set_ylabel('Price')
plt.show()
symbols = ['AAPL', 'SPY' , 'IBM', 'GOOG', 'TSLA']
if __name__ == "__main__":
dates = dates_creator()
df = get_data(symbols, dates)
plot_selected_data(df, '2013-01-01', '2013-01-31', ['SPY', 'IBM'])
| apache-2.0 |
Godiyos/python-for-android | python-modules/twisted/twisted/python/procutils.py | 61 | 1380 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for dealing with processes.
"""
import os
def which(name, flags=os.X_OK):
"""Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
@type name: C{str}
@param name: The name for which to search.
@type flags: C{int}
@param flags: Arguments to L{os.access}.
@rtype: C{list}
@param: A list of the full paths to files found, in the
order in which they were found.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
| apache-2.0 |
takeshineshiro/neutron | neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py | 16 | 2421 | # Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
AGENT_TYPE_MLNX = 'Mellanox plugin agent'
VIF_TYPE_IB_HOSTDEV = 'ib_hostdev'
class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using Mellanox eSwitch L2 agent.
The MellanoxMechanismDriver integrates the ml2 plugin with the
Mellanox eswitch L2 agent. Port binding with this driver requires the
Mellanox eswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(MlnxMechanismDriver, self).__init__(
agent_type=AGENT_TYPE_MLNX,
vif_type=VIF_TYPE_IB_HOSTDEV,
vif_details={portbindings.CAP_PORT_FILTER: False},
supported_vnic_types=[portbindings.VNIC_DIRECT])
def get_allowed_network_types(self, agent=None):
return [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
p_constants.TYPE_VLAN]
def get_mappings(self, agent):
return agent['configurations'].get('interface_mappings', {})
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self.check_segment_for_agent(segment, agent):
if (segment[api.NETWORK_TYPE] in
(p_constants.TYPE_FLAT, p_constants.TYPE_VLAN)):
self.vif_details['physical_network'] = segment[
'physical_network']
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details)
| apache-2.0 |
Acehaidrey/incubator-airflow | airflow/operators/hive_to_druid.py | 7 | 1700 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated.
Please use `airflow.providers.apache.druid.transfers.hive_to_druid`.
"""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.druid.transfers.hive_to_druid import HiveToDruidOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.druid.transfers.hive_to_druid`.",
DeprecationWarning,
stacklevel=2,
)
class HiveToDruidTransfer(HiveToDruidOperator):
"""This class is deprecated.
Please use:
`airflow.providers.apache.druid.transfers.hive_to_druid.HiveToDruidOperator`.
"""
def __init__(self, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.apache.druid.transfers.hive_to_druid.HiveToDruidOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(**kwargs)
| apache-2.0 |
kosior/eventful | eventful/userprofiles/migrations/0001_initial.py | 1 | 1729 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-16 18:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FriendRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend_requests_sent', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend_requests_received', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('timezone', models.CharField(blank=True, max_length=35)),
('friends', models.ManyToManyField(related_name='_userprofile_friends_+', to='userprofiles.UserProfile')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='friendrequest',
unique_together=set([('from_user', 'to_user')]),
),
]
| mit |
pintubigfoot/pinturun | printrun/stlview.py | 1 | 13376 | #!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
import time
import threading
import pyglet
pyglet.options['debug_gl'] = True
from pyglet.gl import *
from pyglet import gl
from .gl.panel import wxGLPanel
from .gl.trackball import trackball, mulquat, build_rotmatrix
from .gl.libtatlin import actors
def vec(*args):
return (GLfloat * len(args))(*args)
class stlview(object):
def __init__(self, facets, batch):
# Create the vertex and normal arrays.
vertices = []
normals = []
for i in facets:
for j in i[1]:
vertices.extend(j)
normals.extend(i[0])
# Create a list of triangle indices.
indices = range(3 * len(facets)) # [[3*i, 3*i+1, 3*i+2] for i in xrange(len(facets))]
#print indices[:10]
self.vertex_list = batch.add_indexed(len(vertices) // 3,
GL_TRIANGLES,
None, # group,
indices,
('v3f/static', vertices),
('n3f/static', normals))
def delete(self):
self.vertex_list.delete()
class StlViewPanel(wxGLPanel):
def __init__(self, parent, size, id = wx.ID_ANY, build_dimensions = None):
super(StlViewPanel, self).__init__(parent, id, wx.DefaultPosition, size, 0)
self.batches = []
self.rot = 0
self.canvas.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.double)
self.initialized = 1
self.canvas.Bind(wx.EVT_MOUSEWHEEL, self.wheel)
self.parent = parent
self.initpos = None
if build_dimensions:
self.build_dimensions = build_dimensions
else:
self.build_dimensions = [200, 200, 100, 0, 0, 0]
self.platform = actors.Platform(self.build_dimensions, light = True)
self.dist = max(self.build_dimensions[0], self.build_dimensions[1])
self.basequat = [0, 0, 0, 1]
wx.CallAfter(self.forceresize)
self.mousepos = (0, 0)
def OnReshape(self):
self.mview_initialized = False
super(StlViewPanel, self).OnReshape()
#==========================================================================
# GLFrame OpenGL Event Handlers
#==========================================================================
def OnInitGL(self, call_reshape = True):
'''Initialize OpenGL for use in the window.'''
if self.GLinitialized:
return
self.GLinitialized = True
#create a pyglet context for this panel
self.pygletcontext = gl.Context(gl.current_context)
self.pygletcontext.canvas = self
self.pygletcontext.set_current()
#normal gl init
glClearColor(0, 0, 0, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
# Uncomment this line for a wireframe view
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return (GLfloat * len(args))(*args)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, vec(0, 0.1, 0, 0.9))
if call_reshape:
self.OnReshape()
if self.parent.filenames:
for filename in self.parent.filenames:
self.parent.load_file(None, filename)
def double(self, event):
p = event.GetPositionTuple()
sz = self.GetClientSize()
v = map(lambda m, w, b: b * m / w, p, sz, self.build_dimensions[0:2])
v[1] = self.build_dimensions[1] - v[1]
v += [300]
print "Double-click at " + str(v) + " in "
print self
def forceresize(self):
self.SetClientSize((self.GetClientSize()[0], self.GetClientSize()[1] + 1))
self.SetClientSize((self.GetClientSize()[0], self.GetClientSize()[1] - 1))
threading.Thread(target = self.update).start()
self.initialized = 0
def move_shape(self, delta):
"""moves shape (selected in l, which is list ListBox of shapes)
by an offset specified in tuple delta.
Positive numbers move to (rigt, down)"""
name = self.parent.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.parent.l.GetString(name)
model = self.parent.models[name]
model.offsets = [model.offsets[0] + delta[0],
model.offsets[1] + delta[1],
model.offsets[2]
]
self.Refresh()
return True
def move(self, event):
"""react to mouse actions:
no mouse: show red mousedrop
LMB: move active object,
with shift rotate viewport
RMB: nothing
with shift move viewport
"""
self.mousepos = event.GetPositionTuple()
if event.Dragging() and event.LeftIsDown():
if self.initpos is None:
self.initpos = event.GetPositionTuple()
else:
if not event.ShiftDown():
p1 = self.initpos
p2 = event.GetPositionTuple()
x1, y1, _ = self.mouse_to_3d(p1[0], p1[1])
x2, y2, _ = self.mouse_to_3d(p2[0], p2[1])
self.move_shape((x2 - x1, y2 - y1))
self.initpos = p2
return
p1 = self.initpos
p2 = event.GetPositionTuple()
sz = self.GetClientSize()
p1x = (float(p1[0]) - sz[0] / 2) / (sz[0] / 2)
p1y = -(float(p1[1]) - sz[1] / 2) / (sz[1] / 2)
p2x = (float(p2[0]) - sz[0] / 2) / (sz[0] / 2)
p2y = -(float(p2[1]) - sz[1] / 2) / (sz[1] / 2)
quat = trackball(p1x, p1y, p2x, p2y, 0.8)
self.basequat = mulquat(self.basequat, quat)
self.initpos = p2
elif event.ButtonUp(wx.MOUSE_BTN_LEFT):
if self.initpos is not None:
self.initpos = None
elif event.ButtonUp(wx.MOUSE_BTN_RIGHT):
if self.initpos is not None:
self.initpos = None
elif event.Dragging() and event.RightIsDown():
if self.initpos is None:
self.initpos = event.GetPositionTuple()
else:
p1 = self.initpos
p2 = event.GetPositionTuple()
if self.orthographic:
x1, y1, _ = self.mouse_to_3d(p1[0], p1[1])
x2, y2, _ = self.mouse_to_3d(p2[0], p2[1])
glTranslatef(x2 - x1, y2 - y1, 0)
else:
glTranslatef(p2[0] - p1[0], -(p2[1] - p1[1]), 0)
self.initpos = p2
def rotate_shape(self, angle):
"""rotates acive shape
positive angle is clockwise
"""
name = self.parent.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.parent.l.GetString(name)
model = self.parent.models[name]
model.rot += angle
def wheel(self, event):
"""react to mouse wheel actions:
rotate object
with shift zoom viewport
"""
delta = event.GetWheelRotation()
if not event.ShiftDown():
angle = 10
if delta > 0:
self.rotate_shape(angle / 2)
else:
self.rotate_shape(-angle / 2)
else:
factor = 1.05
x, y = event.GetPositionTuple()
x, y, _ = self.mouse_to_3d(x, y)
if delta > 0:
self.zoom(factor, (x, y))
else:
self.zoom(1 / factor, (x, y))
def keypress(self, event):
"""gets keypress events and moves/rotates acive shape"""
keycode = event.GetKeyCode()
print keycode
step = 5
angle = 18
if event.ControlDown():
step = 1
angle = 1
#h
if keycode == 72:
self.move_shape((-step, 0))
#l
if keycode == 76:
self.move_shape((step, 0))
#j
if keycode == 75:
self.move_shape((0, step))
#k
if keycode == 74:
self.move_shape((0, -step))
#[
if keycode == 91:
self.rotate_shape(-angle)
#]
if keycode == 93:
self.rotate_shape(angle)
event.Skip()
def update(self):
while True:
time.sleep(0.05)
try:
wx.CallAfter(self.Refresh)
except:
return
def anim(self, obj):
g = 50 * 9.8
v = 20
dt = 0.05
basepos = obj.offsets[2]
obj.offsets[2] += obj.animoffset
while obj.offsets[2] > -1:
time.sleep(dt)
obj.offsets[2] -= v * dt
v += g * dt
if(obj.offsets[2] < 0):
obj.scale[2] *= 1 - 3 * dt
#return
v = v / 4
while obj.offsets[2] < basepos:
time.sleep(dt)
obj.offsets[2] += v * dt
v -= g * dt
obj.scale[2] *= 1 + 5 * dt
obj.scale[2] = 1.0
def create_objects(self):
'''create opengl objects when opengl is initialized'''
if not self.platform.initialized:
self.platform.init()
self.initialized = 1
wx.CallAfter(self.Refresh)
def drawmodel(self, m, n):
batch = pyglet.graphics.Batch()
stlview(m.facets, batch = batch)
m.batch = batch
m.animoffset = 300
#print m
#threading.Thread(target = self.anim, args = (m, )).start()
wx.CallAfter(self.Refresh)
def update_object_resize(self):
'''called when the window recieves only if opengl is initialized'''
pass
def draw_objects(self):
'''called in the middle of ondraw after the buffer has been cleared'''
self.create_objects()
glPushMatrix()
glTranslatef(0, 0, -self.dist)
glMultMatrixd(build_rotmatrix(self.basequat)) # Rotate according to trackball
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.2, 0.2, 0.2, 1))
glTranslatef(- self.build_dimensions[3] - self.platform.width / 2,
- self.build_dimensions[4] - self.platform.depth / 2, 0) # Move origin to bottom left of platform
# Draw platform
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
self.platform.draw()
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# Draw mouse
glPushMatrix()
x, y, z = self.mouse_to_3d(self.mousepos[0], self.mousepos[1], 0.9)
glTranslatef(x, y, z)
glBegin(GL_TRIANGLES)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(1, 0, 0, 1))
glNormal3f(0, 0, 1)
glVertex3f(2, 2, 0)
glVertex3f(-2, 2, 0)
glVertex3f(-2, -2, 0)
glVertex3f(2, -2, 0)
glVertex3f(2, 2, 0)
glVertex3f(-2, -2, 0)
glEnd()
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.3, 0.7, 0.5, 1))
glPopMatrix()
glPushMatrix()
# Draw objects
for i in self.parent.models.values():
glPushMatrix()
glTranslatef(*(i.offsets))
glRotatef(i.rot, 0.0, 0.0, 1.0)
glScalef(*i.scale)
i.batch.draw()
glPopMatrix()
glPopMatrix()
glPopMatrix()
def main():
app = wx.App(redirect = False)
frame = wx.Frame(None, -1, "GL Window", size = (400, 400))
StlViewPanel(frame)
frame.Show(True)
app.MainLoop()
app.Destroy()
if __name__ == "__main__":
main()
| gpl-3.0 |
l8orre/XG8 | nxtPwt/nxtDB.py | 2 | 11048 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 25 10:17:16 2014
@author: azure
"""
from PyQt4 import QtCore #Qt,
from PyQt4.Qt import QTimer
from PyQt4.QtCore import QObject , pyqtSignal, pyqtSlot, SIGNAL
import sqlite3 as sq
from nxtPwt.nxtApiPrototypes import nxtQs
import os
from requests import Request as Req
from requests import Session
from string import ascii_letters as letters
from string import digits
from numpy.random import randint as ri
from operator import mod as opmod
class nxtUseCaseMeta(QObject):
""" This is an abstract meta class that has elemtary sigs and methods defined.
All use case classes inherit from this, so they know all the signals for emission
The useCaseClass is tho ONLY one that talks to the api.
"""
apiCalls = nxtQs() # static! dict of prototypes to be filled with vals for apiReq
def __init__(self, sessMan ): #
super(nxtUseCaseMeta, self).__init__()
self.nxtApi = sessMan.nxtApi # there is only ONE apiSigs instance, and that is in the sessMan.
# **kwargs as dict ?!?!
class WalletDB_Handler(nxtUseCaseMeta): # need to talk to NRS, hence UC derived
"""
This is a container and manager object for the QThrerasd that checks the blockchain for new blocks."""
def __init__(self, sessMan, walletDB_fName = "nxtWallet.dat", walletLogger = None , consLogger =None, host='localhost' , port='6876' ):
super(nxtUseCaseMeta, self).__init__( parent = None)
self.sessMan = sessMan
self.qPool = sessMan.qPool # qPool is already in sessMan!
self.consLogger = consLogger
self.walletLogger = walletLogger
self.walletDB_fName = walletDB_fName
self.sessUrl = 'http://' + host + ':' + port + '/nxt?'
self.init_WalletDB() # the init of the sqlite DB is not supposed to be threaded!
self.walletPass = None
DB = ( self.walletDBConn, self.walletDBCur)
# the QTHread dual bundle: Emitter+Runner
self.walletDB_Emitter = WalletDB_Emitter( self.consLogger, self.walletLogger )
self.walletDBb_Runner = WalletDB_Runner( self.walletDB_Emitter, self.sessMan, DB, self.consLogger , self.walletLogger ) #self.DBLogger, )
self.walletDBb_Runner.setAutoDelete(False)
self.qPool.start(self.walletDBb_Runner)
self.consLogger.info(' WalletDB_Handler - self.qPool.activeThreadCount() = %s ', str( self.qPool.activeThreadCount()) )
def genRanSec(self):
allchars = letters+digits
numChars = len(allchars) # 62 no special chars, just caps, digs, mins
ranSec = ''
charList = ri(0,numChars, 96 )
for char in charList:
ranSec += allchars[char]
return ranSec
def init_WalletDB(self, ):
# CREATE TABLE customer(
# id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# firstname VARCHAR(50),
# lastname VARCHAR(50),
# age INTEGER
# )
#
#
# 1Ce1NpJJAH9uLKMR37vzAmnqTjB4Ck8L4g l=34
# NXT-JTA7-B2QR-8BFC-2V222 l =24
# NXTxJTA7xB2QRx8BFCx2V222nxtnxtnxtx l =34
#
# NFD-AQQA-MREZ-U45Z-FWSZG l=24
# 15528161504488872648 l=20
#Longer answer: If you declare a column of a table to be INTEGER PRIMARY KEY, then whenever you insert a NULL into that column of the table, the NULL is automatically converted into an integer which is one greater than the largest value of that column over all other rows in the table, or 1 if the table is empty.
#id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
wallet_is_new = not os.path.exists(self.walletDB_fName)
try:
nxtWallet_sqlite3_schema = """create table nxtWallet (
accountName text unique,
NxtNumeric VARCHAR(20) unique primary key,
NxtRS VARCHAR(24) unique,
NxtRS_BTC VARCHAR(34),
NxtSecret VARCHAR(96) unique,
Nxt_hasPubkey VARCHAR(5)
);
"""
# nxtWallet_sqlite3_schema = """create table nxtWallet (
# accountName text unique,
# NxtNumeric text primary key,
# NxtRS text unique,
# NxtRS_BTC text,
# NxtSecret text unique,
# Nxt_hasPubkey text
# );
# """
#
self.consLogger.info('creating wallet.db with filename: %s ', self.walletDB_fName )
self.walletDBConn = sq.connect(self.walletDB_fName)
self.walletDBCur = self.walletDBConn.cursor()
self.walletDBCur.execute(nxtWallet_sqlite3_schema)
sessionTemp = Session()
nxtSecret = self.genRanSec()
headers = {'content-type': 'application/json'}
getAccountId = {
"requestType" : "getAccountId" , \
"secretPhrase" : nxtSecret , \
"pubKey" : ""
}
NxtReqT = Req( method='POST', url = self.sessUrl, params = {}, headers = headers )
NxtReqT.params=getAccountId # same obj, only replace params
preppedReq = NxtReqT.prepare()
response = sessionTemp.send(preppedReq)
NxtResp = response.json()
# print("\n\n ------>" + str(NxtResp))
nxtNumAcc = NxtResp['account']
nxtRSAcc = NxtResp['accountRS']
NxtRS_BTC = NxtResp['accountRS']
NxtRS_BTC =NxtRS_BTC.replace("-","x")
NxtRS_BTC+='nxtnxtnxt'
# make sure it has no pubKey here!!!
# and if it has raise a huge alarm!
accName = ''
has_pubKey = 'N'
newNxtAccount = ( accName,nxtNumAcc ,nxtRSAcc ,NxtRS_BTC ,nxtSecret , has_pubKey)
insertNewNxtAccount = """insert into nxtWallet values ( ?,?,?,?,?,?)"""
self.walletDBCur.execute(insertNewNxtAccount, newNxtAccount )
self.walletDBConn.commit()
#http://stackoverflow.com/questions/11490100/no-autoincrement-for-integer-primary-key-in-sqlite3 shit.
testACCName = 'testAcc'
testNetAcc = '2865886802744497404' # <------testNet------------- FOR TESTING with non-existing accounts in wallet!!!
testNetAccRS = 'NXT-3P9W-VMQ3-9DRR-4EFKH'
testNetAccRS_BTC = testNetAccRS.replace("-","x")
testNetAccRS_BTC += 'nxtnxtnxt'
testNetAccSec = '' #
testACC = ( testACCName, testNetAcc , testNetAccRS,testNetAccRS_BTC ,testNetAccSec , 'Y' )
self.walletDBCur.execute(insertNewNxtAccount, testACC )
self.walletDBConn.commit()
testACCName = 'testAcc2'
testNetAcc = '16159101027034403504' # <------testNet------------- FOR TESTING with non-existing accounts in wallet!!!
testNetAccRS = 'NXT-L6PJ-SMZ2-5TDB-GA7J2'
testNetAccRS_BTC = testNetAccRS.replace("-","x")
testNetAccRS_BTC += 'nxtnxtnxt'
testNetAccSec = '' #
testACC = ( testACCName, testNetAcc , testNetAccRS,testNetAccRS_BTC ,testNetAccSec , 'Y' )
self.walletDBCur.execute(insertNewNxtAccount, testACC )
self.walletDBConn.commit()
except Exception as inst:
except_reason = str(inst.args)
self.consLogger.info('could not create wallet db with filename %s because: %s.', self.walletDB_fName, except_reason )
self.walletDBConn = sq.connect(self.walletDB_fName)
self.walletDBCur = self.walletDBConn.cursor()
self.walletDBCur.execute('SELECT SQLITE_VERSION()')
sqlVers = self.walletDBCur.fetchone()
self.consLogger.info('use existing db with SQLite version: %s ', sqlVers )
self.walletDBConn = sq.connect(self.walletDB_fName)
self.walletDBCur = self.walletDBConn.cursor()
#########################################################
self.walletDBConn.commit()
self.consLogger.info('walletDB - some info here!')
class WalletDB_Emitter(QObject):
walletDBSig = pyqtSignal(object ,object)
def __init__(self, consLogger = None , walletLogger = None ): #emitter,
super(WalletDB_Emitter, self).__init__()
self.conLogger = consLogger
self.walletLogger = walletLogger
class WalletDB_Runner(QtCore.QRunnable):
"""- This is what needs to be put into the QThreadpool """
#nxtApi = nxtApi
def __init__(self, emitter , sessMan, DB, consLogger = None , walletLogger = None, ): #emitter,
super(QtCore.QRunnable, self).__init__()
self.consLogger = consLogger
self.walletLogger = walletLogger
self.walletDBConn = DB[0]
self.walletDBCur = DB[1]
self.emitter = emitter
self.walletDB_pollTime = 25000
self.walletDBTimer = QTimer()
QObject.connect(self.walletDBTimer, SIGNAL("timeout()"), self.walletDBTimer_CB)
def run(self,):
self.blockDBTimer.start(self.blockDB_pollTime)
def run(self,):
self.walletDBTimer.start(self.walletDB_pollTime)
def walletDBTimer_CB(self,):
pass
# this is a heartbeat for now!
self.consLogger.info('walletDB heartbeat')
#########################################################
# do the activities here
# INSERT INTO addresses (accN, addr,secret) VALUES (NULL, "1X123", "sec13");
# SELECT * FROM addresses WHERE accN is NULL;
# SELECT * FROM addresses WHERE addr="1X123";
# DELETE from addresses where addr="1X123";
# CREATE TABLE addresses (accountName TEXT, address TEXT UNIQUE, secret TEXT);
# INSERT INTO addresses (accountName, address, secret) VALUES (NULL, "1X123", "sec13");
# SELECT * FROM addresses WHERE accountName is NULL;
#
#self.blockDBCur.execute("CREATE TABLE nxtBlockH_to_Addr(height INT, blockAddr TEXT)")
#self.blockDBCur.execute("INSERT INTO nxtBlockH_to_Addr(height, blockAddr) VALUES(?,?)",( 0 , "2680262203532249785")) # genesis b
#
#
#
#
#
# CREATE TABLE t1(a, b UNIQUE);
#
# CREATE TABLE t1(a, b PRIMARY KEY);
#
# CREATE TABLE t1(a, b);
# CREATE UNIQUE INDEX t1b ON t1(b);
#
#accN|addr|secret
#|1K123|sadf
#|1K456|sterst
#sqlite>
| mit |
chvrga/outdoor-explorer | java/play-1.4.4/python/Lib/hotshot/log.py | 3 | 6336 | import _hotshot
import os.path
import parser
import symbol
from _hotshot import \
WHAT_ENTER, \
WHAT_EXIT, \
WHAT_LINENO, \
WHAT_DEFINE_FILE, \
WHAT_DEFINE_FUNC, \
WHAT_ADD_INFO
__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
# fileno -> filename
self._filemap = {}
# (fileno, lineno) -> filename, funcname
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if self._info.has_key('current-directory'):
self.cwd = self._info['current-directory']
else:
self.cwd = None
# This mirrors the call stack of the profiled code as the log
# is read back in. It contains tuples of the form:
#
# (file name, line number of function def, function name)
#
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, "unknown fileno"
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, "unknown filename"
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
raise ValueError, "unknown function location"
# Iteration support:
# This adds an optional (& ignored) parameter to next() so that the
# same bound method can be used as the __getitem__() method -- this
# avoids using an additional method call which kills the performance.
def next(self, index=0):
while 1:
# This call may raise StopIteration:
what, tdelta, fileno, lineno = self._nextitem()
# handle the most common cases first
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return what, t, tdelta
if what == WHAT_EXIT:
return what, self._pop(), tdelta
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return what, (filename, lineno, funcname), tdelta
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[(fileno, lineno)] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
# value already loaded into self.info; call the
# overridable addinfo() handler so higher-level code
# can pick up the new value
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, "unknown event type"
def __iter__(self):
return self
#
# helpers
#
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
#
# This should only be needed when the log file does not
# contain all the DEFINE_FUNC records needed to allow the
# function name to be retrieved from the log file.
#
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[(fileno, lineno)]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[(fileno, lineno)] = (filename, funcname)
return filename, funcname
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print "Could not identify fileId", fileno
return 1
if filename is None:
return 1
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
# Scan the tree looking for def and lambda nodes, filling in
# self._funcmap with all the available information.
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
elif sym == lambdef:
self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
stack.extend(list(tree[1:]))
| mit |
altcommunitycoin/altcommunitycoin-skunk | contrib/linearize/linearize-hashes.py | 4 | 2762 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 16174
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
USGSDenverPychron/pychron | pychron/image/cv_wrapper.py | 1 | 7683 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from collections import namedtuple
from numpy import array, asarray, ndarray
from numpy.lib.function_base import percentile
from scipy.ndimage.filters import laplace
try:
from cv2 import VideoCapture, VideoWriter, imwrite, line, fillPoly, \
polylines, \
rectangle, imread, findContours, drawContours, arcLength, \
approxPolyDP, contourArea, isContourConvex, boundingRect, GaussianBlur, \
addWeighted, \
circle, moments, minAreaRect, minEnclosingCircle, convexHull
from cv import ConvertImage, fromarray, LoadImage, Flip, \
Resize, CreateImage, CvtColor, Scalar, CreateMat, Copy, GetSubRect, \
PolyLine, Split, \
Merge, Laplace, ConvertScaleAbs, GetSize
from cv import CV_CVTIMG_SWAP_RB, CV_8UC1, CV_BGR2GRAY, CV_GRAY2BGR, \
CV_8UC3, CV_RGB, CV_16UC1, CV_32FC3, CV_CHAIN_APPROX_NONE, \
CV_RETR_EXTERNAL, \
CV_AA, CV_16UC3, CV_16SC1
except ImportError, e:
print 'exception', e
print 'OpenCV required'
# ============= local library imports ==========================
from pychron.core.geometry.centroid import calculate_centroid
def get_focus_measure(src, kind):
if not isinstance(src, ndarray):
src = asarray(src)
dst = laplace(src.astype(float))
d = dst.flatten()
d = percentile(d, 99)
return d.mean()
def crop(src, x, y, w, h):
if not isinstance(src, ndarray):
src = asarray(src)
return src[y:y + h, x:x + w]
def save_image(src, path):
if not isinstance(src, ndarray):
src = asarray(src)
imwrite(path, src)
def colorspace(src, cs=None):
from skimage.color.colorconv import gray2rgb
if not isinstance(src, ndarray):
src = asarray(src)
return gray2rgb(src)
def grayspace(src):
if isinstance(src, ndarray):
from skimage.color.colorconv import rgb2gray
dst = rgb2gray(src)
else:
if src.channels > 1:
dst = CreateMat(src.height, src.width, CV_8UC1)
CvtColor(src, dst, CV_BGR2GRAY)
else:
dst = src
return dst
def resize(src, w, h, dst=None):
if isinstance(dst, tuple):
dst = CreateMat(*dst)
if isinstance(src, ndarray):
src = asMat(src)
if dst is None:
dst = CreateMat(int(h), int(w), src.type)
Resize(src, dst)
return dst
def flip(src, mode):
Flip(src, src, mode)
return src
def get_size(src):
if hasattr(src, 'width'):
return src.width, src.height
else:
h, w = src.shape[:2]
return w, h
def swap_rb(src):
try:
ConvertImage(src, src, CV_CVTIMG_SWAP_RB)
except TypeError:
src = fromarray(src)
ConvertImage(src, src, CV_CVTIMG_SWAP_RB)
return src
_cv_swap_rb = swap_rb
def asMat(arr):
return fromarray(arr)
def load_image(p, swap_rb=False):
img = imread(p)
if swap_rb:
img = _cv_swap_rb(img)
return img
def get_capture_device():
v = VideoCapture()
return v
def new_video_writer(path, fps, size):
fourcc = 'MJPG'
v = VideoWriter(path, fourcc, fps, size)
return v
# ===============================================================================
# image manipulation
# ===============================================================================
def sharpen(src):
src = asarray(src)
im = GaussianBlur(src, (3, 3), 3)
addWeighted(src, 1.5, im, -0.5, 0, im)
return im
# ===============================================================================
# drawing
# ===============================================================================
_new_point = namedtuple('Point', 'x y')
def new_point(x, y, tt=False):
x, y = map(int, (x, y))
if tt:
return x, y
else:
return _new_point(x, y)
def convert_color(color):
if isinstance(color, tuple):
color = CV_RGB(*color)
else:
color = Scalar(color)
return color
def draw_circle(src, center, radius, color=(255.0, 0, 0), thickness=1):
if isinstance(center, tuple):
center = new_point(*center)
circle(src, center, radius,
convert_color(color),
thickness=thickness,
lineType=CV_AA)
def draw_lines(src, lines, color=(255, 0, 0), thickness=3):
if lines:
for p1, p2 in lines:
p1 = new_point(*p1)
p2 = new_point(*p2)
line(src, p1, p2,
convert_color(color), thickness, 8)
def draw_polygons(img, polygons, thickness=1, color=(0, 255, 0)):
color = convert_color(color)
if thickness == -1:
fillPoly(img, polygons, color)
else:
polylines(img, array(polygons, dtype='int32'), 1, color,
thickness=thickness)
def draw_rectangle(src, x, y, w, h, color=(255, 0, 0), thickness=1):
p1 = new_point(x, y, tt=True)
p2 = new_point(x + w, y + h, tt=True)
rectangle(src, p1, p2, convert_color(color), thickness=thickness)
def draw_contour_list(src, contours, hierarchy, external_color=(0, 255, 255),
hole_color=(255, 0, 255),
thickness=1):
n = len(contours)
for i, _ in enumerate(contours):
j = i + 1
drawContours(src, contours, i,
convert_color((j * 255 / n, j * 255 / n, 0)), -1
)
def get_centroid(pts):
return calculate_centroid(pts)
# ===============================================================================
# segmentation
# ===============================================================================
def contour(src):
return findContours(src.copy(), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE)
def get_polygons(src,
contours, hierarchy,
convextest=False,
nsides=5,
min_area=100,
perimeter_smooth_factor=0.001,
**kw):
polygons = []
areas = []
centroids = []
min_enclose = []
for cont in contours:
m = arcLength(cont, True)
result = approxPolyDP(cont, m * perimeter_smooth_factor, True)
area = abs(contourArea(result))
M = moments(cont)
if not M['m00']:
continue
cent = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
if not len(result) > nsides:
continue
if not area > min_area:
continue
if convextest and not isContourConvex(result):
continue
a, _, b = cont.shape
polygons.append(cont.reshape(a, b))
ca = minEnclosingCircle(result)
min_enclose.append(ca[1] ** 2 * 3.1415)
areas.append(area)
centroids.append(cent)
return polygons, areas, min_enclose, centroids
# ============= EOF =============================================
| apache-2.0 |
hriechmann/distributed_process_manager | src/eu/hriechmann/distributed_process_manager/common.py | 1 | 2777 | __author__ = 'hriechma'
from enum import Enum
import hashlib
import os
ManagerCommands = Enum("ManagerCommands", "REGISTER INIT_PROCESS START_PROCESS STOP_PROCESS SEND_LOGS KEEPALIVE")
ClientCommands = Enum("ClientCommands", "REGISTER PROCESSSTATUS_CHANGED PROCESS_LOGS KEEPALIVE")
ServerCommands = Enum("ServerCommands", "NEW_CLIENT LOST_CLIENT SEND_KEEPALIVE")
ProcessStati = Enum("ProcessStati", "INIT RUNNING STOPPING STOPPED FAILED KILLED")
ClientStati = Enum("ClientStati", "NOT_RUNNING RUNNING")
class Message(object):
def __init__(self, receiver, command, payload=None):
self.receiver = receiver
self.command = command
self.payload = payload
def __str__(self):
return "Message to "+str(self.receiver)+". Command is: "+str(self.command)
class ProcessDescription(object):
def __init__(self, id, target_host, command, working_directory="", env={}):
self.id = id
self.target_host = target_host
self.command = command
self.working_directory = working_directory
self.env = env
print("Creating process: ", env)
def __str__(self):
return "Process: "+str(self.id)+"to be executed on host: "+self.target_host+"Command: "+self.command
class ProcessStatus(object):
def __init__(self, responsible_client, process_desc):
self.responsible_client = responsible_client
self.process_desc = process_desc
self.status = ProcessStati.INIT
self.log_out = ""
self.log_err = ""
def __str__(self):
return "ProcessStatues: Process: "+str(self.process_desc)+" on client: " +\
str(self.responsible_client)+" has status: "+self.status
class ClientDescription(object):
def __init__(self, hostname, local_path=""):
self.hostname = hostname
self.status = ClientStati.NOT_RUNNING
self.local_path = local_path
BLOCKSIZE = 65536
MAX_SIZE = 1024*1024*100
class IncrementalFileReaderAndHasher(object):
def __init__(self, filename):
self.hasher = hashlib.sha1()
self.filename = filename
statinfo = os.stat(self.filename)
self.big_file = statinfo > MAX_SIZE
if self.big_file:
self.file_contents = None
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
else:
afile = open(self.filename,'ab')
self.file_contents = afile.read()
self.hasher.update(self.file_contents)
def hash(self):
return self.hasher.hexdigest()
def get_file_contents(self):
if self.big_file:
TODO
else:
return self.file_contents | mit |
JuliaPackageMirrors/NumericExtensions.jl | doc/source/conf.py | 4 | 7870 | # -*- coding: utf-8 -*-
#
# NumericExtensions.jl documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 24 08:00:56 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NumericExtensions.jl'
copyright = u'2013, Dahua Lin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NumericExtensionsjldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'NumericExtensionsjl.tex', u'NumericExtensions.jl Documentation',
u'Dahua Lin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'numericextensionsjl', u'NumericExtensions.jl Documentation',
[u'Dahua Lin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NumericExtensionsjl', u'NumericExtensions.jl Documentation',
u'Dahua Lin', 'NumericExtensionsjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
Aaron0927/xen-4.2.1 | tools/tests/utests/run_all_tests.py | 42 | 1310 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 flonatel GmbH & Co. KG
#============================================================================
import unittest
import utests.ut_util.ut_fileuri
import utests.ut_xend.ut_XendConfig
import utests.ut_xend.ut_image
suite = unittest.TestSuite(
[utests.ut_util.ut_fileuri.suite(),
utests.ut_xend.ut_XendConfig.suite(),
utests.ut_xend.ut_image.suite(),
])
if __name__ == "__main__":
testresult = unittest.TextTestRunner(verbosity=3).run(suite)
| gpl-2.0 |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/sqlalchemy/util/queue.py | 33 | 6548 | # util/queue.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object. The
Queue object is used exclusively by the sqlalchemy.pool.QueuePool
class.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition.
"""
from collections import deque
from time import time as _time
from .compat import threading
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| gpl-3.0 |
phantasien/falkor | deps/bastian/deps/v8/tools/push-to-trunk/git_recipes.py | 8 | 6598 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
class GitFailedException(Exception):
pass
def Strip(f):
def new_f(*args, **kwargs):
return f(*args, **kwargs).strip()
return new_f
def MakeArgs(l):
"""['-a', '', 'abc', ''] -> '-a abc'"""
return " ".join(filter(None, l))
def Quoted(s):
return "\"%s\"" % s
class GitRecipesMixin(object):
def GitIsWorkdirClean(self):
return self.Git("status -s -uno").strip() == ""
@Strip
def GitBranch(self):
return self.Git("branch")
def GitCreateBranch(self, name, branch=""):
assert name
self.Git(MakeArgs(["checkout -b", name, branch]))
def GitDeleteBranch(self, name):
assert name
self.Git(MakeArgs(["branch -D", name]))
def GitReset(self, name):
assert name
self.Git(MakeArgs(["reset --hard", name]))
def GitStash(self):
self.Git(MakeArgs(["stash"]))
def GitRemotes(self):
return map(str.strip, self.Git(MakeArgs(["branch -r"])).splitlines())
def GitCheckout(self, name):
assert name
self.Git(MakeArgs(["checkout -f", name]))
def GitCheckoutFile(self, name, branch_or_hash):
assert name
assert branch_or_hash
self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]))
def GitCheckoutFileSafe(self, name, branch_or_hash):
try:
self.GitCheckoutFile(name, branch_or_hash)
except GitFailedException: # pragma: no cover
# The file doesn't exist in that revision.
return False
return True
def GitChangedFiles(self, git_hash):
assert git_hash
try:
files = self.Git(MakeArgs(["diff --name-only",
git_hash,
"%s^" % git_hash]))
return map(str.strip, files.splitlines())
except GitFailedException: # pragma: no cover
# Git fails using "^" at branch roots.
return []
@Strip
def GitCurrentBranch(self):
for line in self.Git("status -s -b -uno").strip().splitlines():
match = re.match(r"^## (.+)", line)
if match: return match.group(1)
raise Exception("Couldn't find curent branch.") # pragma: no cover
@Strip
def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
branch="", reverse=False):
assert not (git_hash and parent_hash)
args = ["log"]
if n > 0:
args.append("-%d" % n)
if format:
args.append("--format=%s" % format)
if grep:
args.append("--grep=\"%s\"" % grep.replace("\"", "\\\""))
if reverse:
args.append("--reverse")
if git_hash:
args.append(git_hash)
if parent_hash:
args.append("%s^" % parent_hash)
args.append(branch)
return self.Git(MakeArgs(args))
def GitGetPatch(self, git_hash):
assert git_hash
return self.Git(MakeArgs(["log", "-1", "-p", git_hash]))
# TODO(machenbach): Unused? Remove.
def GitAdd(self, name):
assert name
self.Git(MakeArgs(["add", Quoted(name)]))
def GitApplyPatch(self, patch_file, reverse=False):
assert patch_file
args = ["apply --index --reject"]
if reverse:
args.append("--reverse")
args.append(Quoted(patch_file))
self.Git(MakeArgs(args))
def GitUpload(self, reviewer="", author="", force=False, cq=False,
bypass_hooks=False):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
if reviewer:
args += ["-r", Quoted(reviewer)]
if force:
args.append("-f")
if cq:
args.append("--use-commit-queue")
if bypass_hooks:
args.append("--bypass-hooks")
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False)
def GitCommit(self, message="", file_name=""):
assert message or file_name
args = ["commit"]
if file_name:
args += ["-aF", Quoted(file_name)]
if message:
args += ["-am", Quoted(message)]
self.Git(MakeArgs(args))
def GitPresubmit(self):
self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"")
def GitDCommit(self):
self.Git("cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None)
def GitDiff(self, loc1, loc2):
return self.Git(MakeArgs(["diff", loc1, loc2]))
def GitPull(self):
self.Git("pull")
def GitSVNFetch(self):
self.Git("svn fetch")
def GitSVNRebase(self):
self.Git("svn rebase")
# TODO(machenbach): Unused? Remove.
@Strip
def GitSVNLog(self):
return self.Git("svn log -1 --oneline")
@Strip
def GitSVNFindGitHash(self, revision, branch=""):
assert revision
return self.Git(MakeArgs(["svn find-rev", "r%s" % revision, branch]))
@Strip
def GitSVNFindSVNRev(self, git_hash, branch=""):
return self.Git(MakeArgs(["svn find-rev", git_hash, branch]))
def GitSVNDCommit(self):
return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
def GitSVNTag(self, version):
self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
retry_on=lambda x: x is None)
| mit |
josesanch/django-oscar | sites/demo/apps/offers.py | 33 | 1268 | from oscar.apps.offer import models
class AlphabetRange(object):
name = "Products that start with D"
def contains_product(self, product):
return product.title.startswith('D')
def num_products(self):
return None
class BasketOwnerCalledBarry(models.Condition):
name = "User must be called barry"
class Meta:
proxy = True
def is_satisfied(self, basket):
if not basket.owner:
return False
return basket.owner.first_name.lower() == 'barry'
def can_apply_condition(self, product):
return False
def consume_items(self, basket, affected_lines):
return
class ChangesOwnerName(models.Benefit):
class Meta:
proxy = True
def apply(self, basket, condition, offer=None):
condition.consume_items(basket, ())
return models.PostOrderAction(
"You will have your name changed to Barry!")
def apply_deferred(self, basket, order, application):
if basket.owner:
basket.owner.first_name = "Barry"
basket.owner.save()
return "Name changed to Barry!"
return "We tried to apply benefit but couldn't"
@property
def description(self):
return "Changes owners name"
| bsd-3-clause |
mazaclub/mazabot-core | plugins/ChannelLogger/test.py | 19 | 1753 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class ChannelLoggerTestCase(PluginTestCase):
plugins = ('ChannelLogger',)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
mstriemer/zamboni | docs/watcher.py | 67 | 1241 | """
Watch a bunch of files and run a command if any changes are detected.
Usage
-----
::
python watcher.py 'echo changes' one.py two.py
To automatically keep Sphinx docs up to date::
python watcher.py 'make html' $(find . -name '*.rst')
Problems
--------
* The file checking would be way more efficient using inotify or whatever the
equivalent is on OS X.
* It doesn't handle bad input or spaces in filenames.
But it works for me.
"""
import os
import sys
import time
_mtimes = {}
def timecheck(files):
"""Return True if any of the files have changed."""
global _mtimes
for filename in files:
mtime = os.stat(filename).st_mtime
if filename not in _mtimes:
_mtimes[filename] = mtime
elif mtime != _mtimes[filename]:
_mtimes = {}
return True
else:
return False
def watcher(command, files):
"""Run ``command`` if any file in ``files`` changes."""
while True:
if timecheck(files):
os.system(command)
time.sleep(1)
def main():
command, files = sys.argv[1], sys.argv[2:]
try:
watcher(command, files)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| bsd-3-clause |
alexandreleroux/mayavi | mayavi/tools/data_wizards/csv_loader.py | 5 | 7005 | # Author: Ilan Schnell <[email protected]>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from traits.api import HasTraits, Str, Int, Array, List, \
Instance, on_trait_change, Property, Button
from pyface.api import GUI
from traitsui.api import View, Item, HGroup, Group, \
ListEditor, TabularEditor, spring, TextEditor, Controller, VSplit
from traitsui.tabular_adapter import TabularAdapter
from mayavi.tools.data_wizards.csv_sniff import Sniff, loadtxt, \
array2dict
##############################################################################
# ListItem class
##############################################################################
class ListItem(HasTraits):
""" Class used to represent an item in a list with traits UI.
"""
column_number = Int
name = Str
my_name = Str
parent = Instance(HasTraits)
view = View(
HGroup(
Item('name', style='readonly', show_label=False,
resizable=False),
Item('my_name', style='simple', show_label=False,
editor=TextEditor(auto_set=False, enter_set=True),
springy=True),
)
)
##############################################################################
# CSVLoader class
##############################################################################
class CSVLoader(HasTraits):
""" User interface to load CSV files.
"""
# The name of the file being loaded.
filename = Str
# The comment characters
comments = Str(desc="The comment characters")
# The character giving the delimiter between the columns.
delimiter = Str(
desc="The character giving the delimiter between the columns")
# The number of rows to skip at the beginning of the file
skiprows = Int(
desc="The number of rows to skip at the beginning of the file")
columns = List(ListItem)
data = Array
data_dict = Property(depends_on='data')
def _get_data_dict(self):
return array2dict(self.data)
def guess_defaults(self):
try:
kwds = Sniff(self.filename).kwds()
except:
kwds = {'comments': '#',
'delimiter': ',',
'dtype': float,
'skiprows': 0}
if kwds['delimiter']:
self.delimiter = kwds['delimiter']
else:
self.delimiter = ' '
self.comments = kwds['comments']
self.skiprows = kwds['skiprows']
self.names = list(kwds['dtype']['names'])
self.formats = list(kwds['dtype']['formats'])
self.columns = [ListItem(name='Column %i:' % (i + 1),
parent=self,
column_number=i,
my_name=val)
for i, val in enumerate(self.names)]
self.load_data()
def load_data(self):
kwds = {}
kwds['delimiter'] = self.delimiter
kwds['comments'] = self.comments
kwds['skiprows'] = self.skiprows
kwds['dtype'] = dict(names=self.names,
formats=self.formats)
try:
self.data = loadtxt(self.filename, **kwds)
except:
pass
##############################################################################
# CSVLoaderController class
##############################################################################
class CSVLoaderController(Controller):
""" A controller for the CSVLoader.
"""
tabular_editor = Instance(HasTraits)
def _tabular_editor_default(self):
class ArrayAdapter(TabularAdapter):
columns = [(n, i) for i, n in enumerate(self.model.names)]
font = 'Courier 10'
alignment = 'right'
format = '%s'
return TabularEditor(adapter=ArrayAdapter())
update_preview = Button('Update preview')
@on_trait_change('update_preview')
def load_data(self):
self.model.load_data()
@on_trait_change('model.columns.my_name,model.data')
def update_table_editor(self, object, name, old, new):
if isinstance(object, ListItem):
self.tabular_editor.adapter.columns[object.column_number] = \
(new, object.column_number)
GUI.set_trait_later(self.info.ui, 'updated', True)
file_content = Str
@on_trait_change('model.filename')
def update_file(self):
f = open(self.model.filename)
self.file_content = f.read(300)
def default_traits_view(self):
view = View(
VSplit(
HGroup(
Group(
spring,
Item('delimiter',
label='Column delimiter character'),
Item('comments',
label='Comment character'),
Item('skiprows',
label='Number of lines to skip at the '
'beginning of the file'),
spring,
Item('handler.update_preview',
show_label=False),
),
Group(
Item('columns',
show_label=False,
style='readonly',
editor=ListEditor(style='custom'),
springy=True,
),
label="Column names",
show_border=True,
),
),
Group(
Group(
Item('data',
show_label=False,
editor=self.tabular_editor,
),
label="Preview table",
),
Group(
Item('handler.file_content', style='readonly',
show_label=False,
springy=True),
label="%s" % self.model.filename,
),
layout='tab'),
),
buttons=['OK', 'Cancel', 'Help'],
id='csv_load_editor',
resizable=True,
width=640,
height=580,
title='CSV import - [%s]' % self.model.filename
)
return view
if __name__ == '__main__':
from pyface.api import GUI
csv_loader = CSVLoader(filename='mydata.csv')
csv_loader.guess_defaults()
controller = CSVLoaderController(model=csv_loader)
controller.edit_traits()
GUI().start_event_loop()
| bsd-3-clause |
nzt4567/animator | skj_animation.py | 1 | 12444 | #!/usr/bin/env python
''' Process a set of frames using ffmpeg to create required animation '''
# IMPORTS
import skj_std
# AUTHOR
__author__ = skj_std.__author__
__email__ = skj_std.__email__
__status__ = skj_std.__status__
__version__ = skj_std.__version__
__license__ = skj_std.__license__
__year__ = skj_std.__year__
__maintainer__ = skj_std.__maintainer__
def get_anim_records():
''' Return number of lines for every source file
status: finished
yield: int
raise: None
'''
for source_file in skj_std.arguments_values['source']:
yield source_file["num_of_lines"]
def create_speed_seq(file_, int_, frac_):
''' Create int sequence where each number represents count of lines to be added to gnuplot input
status: finished
return: list
raise: None
'''
from math import modf
file_seq = list()
frac_reminder = 0
if frac_ == 0: # If the speed is integer, we could just return the quotient and reminder and not whole list...
file_seq = [int_ for i in range(0, int(file_['num_of_lines'] / int_))]
if file_['num_of_lines'] % int_ != 0: # ... which would save some time && memory
file_seq.append(file_['num_of_lines'] % int_)
else:
while sum(file_seq) != file_['num_of_lines']:
to_append = int_
frac_reminder += frac_
if frac_reminder >= 1:
frac_reminder_frac = modf(frac_reminder)[0]
frac_reminder_natur = modf(frac_reminder)[1]
frac_reminder -= frac_reminder_natur
to_append += int(frac_reminder_natur)
if sum(file_seq) + to_append <= file_['num_of_lines']:
file_seq.append(to_append)
else:
file_seq.append(file_['num_of_lines'] - sum(file_seq))
return file_seq
def determine_anim_type():
''' Decide whether animation is multiplot type or oneline
status: finished
return: str
raise: IndexError
'''
try:
time_max = skj_std.arguments_values['source'][0]
time_min = skj_std.arguments_values['source'][0]
except IndexError as exception_msg:
raise IndexError(skj_std.create_error_msg("INTERNAL", exception_msg))
for source_file in skj_std.arguments_values['source']:
if source_file['time_min'] < time_min['time_min']:
time_min = source_file
if source_file['time_max'] >= time_max['time_max']:
time_max = source_file # If the file with new min can also have new max, use it ...
elif source_file['time_max'] > time_max['time_max']:
time_max = source_file
if source_file['time_min'] <= time_min['time_min']:
time_min = source_file # ... because of correct anim type detection
if time_max["path"] == time_min["path"] and len(skj_std.arguments_values['source']) != 1: # Hope this works
return "multiplot"
return "oneline"
def calculate_sfft():
''' Calculate speed (num of records read each gnuplot iteration), fps, animation duration and num of frames
status: devel - those if-chanins should be gone
return: None
raise: ValueError, ArithmeticError
'''
# 0. No user input
# 1. User speed && fps
# 2. User speed
# 3. User fps
if (skj_std.arguments_values['time'] == skj_std.arguments_defaults['time'] and\
skj_std.arguments_values['speed'] == skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] == skj_std.arguments_defaults['fps']) or\
(skj_std.arguments_values['speed'] != skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] != skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] == skj_std.arguments_defaults['time']) or\
(skj_std.arguments_values['speed'] != skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] == skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] == skj_std.arguments_defaults['time']) or\
(skj_std.arguments_values['speed'] == skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] != skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] == skj_std.arguments_defaults['time']):
# Calculate animation time
try:
skj_std.arguments_values['time'] = skj_std.arguments_values['records'] / \
(skj_std.arguments_values['speed'] * skj_std.arguments_values['fps'])
except (ArithmeticError, ZeroDivisionError) as exception_msg:
raise ArithmeticError(skj_std.create_error_msg("PYTHON", exception_msg))
# 0. User speed && time
# 1. User time
elif (skj_std.arguments_values['speed'] != skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] == skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] != skj_std.arguments_defaults['time']) or\
(skj_std.arguments_values['speed'] == skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] == skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] != skj_std.arguments_defaults['time']):
# Calculate animation fps
try:
skj_std.arguments_values['fps'] = skj_std.arguments_values['records'] / \
(skj_std.arguments_values['speed'] * skj_std.arguments_values['time'])
except (ArithmeticError, ZeroDivisionError) as exception_msg:
raise ArithmeticError(skj_std.create_error_msg("PYTHON", exception_msg))
# User time && fps
elif skj_std.arguments_values['speed'] == skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] != skj_std.arguments_defaults['fps'] and\
skj_std.arguments_values['time'] != skj_std.arguments_defaults['time']:
# Calculate animation speed
try:
skj_std.arguments_values['speed'] = skj_std.arguments_values['records'] / \
(skj_std.arguments_values['fps'] * skj_std.arguments_values['time'])
except (ArithmeticError, ZeroDivisionError) as exception_msg:
raise ArithmeticError(skj_std.create_error_msg("PYTHON", exception_msg))
# User time && speed && fps
elif skj_std.arguments_values['time'] != skj_std.arguments_defaults['time'] and\
skj_std.arguments_values['speed'] != skj_std.arguments_defaults['speed'] and\
skj_std.arguments_values['fps'] != skj_std.arguments_defaults['fps']:
# Calculate correct time
try:
time_check = skj_std.arguments_values['records'] / \
(skj_std.arguments_values['speed'] * skj_std.arguments_values['fps'])
except (ArithmeticError, ZeroDivisionError) as exception_msg:
raise ArithmeticError(skj_std.create_error_msg("PYTHON", exception_msg))
# Check if correct time matches user time
if time_check != skj_std.arguments_values['time']:
if skj_std.arguments_values['ignoreerrors']:
skj_std.print_msg_verbose(err_=skj_std.create_error_msg("INVALID_VALUE", \
skj_std.arguments_values['time']))
skj_std.arguments_values['time'] = time_check
else:
raise ValueError(skj_std.create_error_msg("INVALID_VALUE", skj_std.arguments_values['time']))
from math import ceil
skj_std.arguments_values['frames'] = ceil(skj_std.arguments_values['records'] / skj_std.arguments_values['speed'])
def set_animation_properties():
''' Set properties of animation like speed, time, num of frames, num of records, type, etc
status: finished
return: None
raise: TypeError, IndexError, ValueError
'''
# Get animation type
skj_std.arguments_values['animation_type'] = determine_anim_type() # raise IndexError
# Then calculate the number of valid lines (valid line is also called 'record')
skj_std.arguments_values['records'] = 0 # File with zero records should never exist
for lines_file in get_anim_records():
if skj_std.arguments_values['animation_type'] == "multiplot":
# Multiplot animation has as many records as the longest file has lines
if lines_file > skj_std.arguments_values['records']:
skj_std.arguments_values['records'] = lines_file
else:
# Oneline animation has sum(all_lines) records
skj_std.arguments_values['records'] += lines_file
# Calculate correct speed && fps && frames && time
calculate_sfft() # raise VauleError, ArithmeticError (catch AE? => if ignoreerrors: speed = 0 (see code below))
# Correct speed/fps if it is too low (< 1), cause that leads to crazy long create_speed_seq() && generate_anim()
if skj_std.arguments_values['speed'] < 1 or skj_std.arguments_values['fps'] < 1:
if skj_std.arguments_values['ignoreerrors']:
skj_std.print_msg_verbose(err_=skj_std.create_error_msg("TOO_SMALL_ANIM", \
str(skj_std.arguments_values['speed']) + \
"/" + str(skj_std.arguments_values['fps'])))
skj_std.arguments_values['speed'] = skj_std.arguments_defaults['speed']
skj_std.arguments_values['fps'] = skj_std.arguments_defaults['fps']
skj_std.arguments_values['time'] = skj_std.arguments_defaults['time']
calculate_sfft()
else:
raise ValueError(skj_std.create_error_msg("TOO_SMALL_ANIM", \
str(skj_std.arguments_values['speed']) + "/" + str(skj_std.arguments_values['fps'])))
# Create sequence of records added to every created frame
from math import modf
try: # Divide the speed on it's integer and fractional parts
speed_fraction = modf(skj_std.arguments_values['speed'])[0]
speed_integer = int(modf(skj_std.arguments_values['speed'])[1])
except TypeError as exception_msg:
raise TypeError(skj_std.create_error_msg("INTERNAL", exception_msg))
for source_file in skj_std.arguments_values['source']: # Add the sequence to each file's properties
source_file['adding_seq'] = create_speed_seq(file_=source_file, int_=speed_integer, frac_=speed_fraction)
def create_animation():
''' Finally, call ffmpeg and let it do it's magic
status: finished
return: None
raise: OSError
'''
import os
import subprocess
from sys import argv
if skj_std.arguments_values['name'] == skj_std.arguments_defaults['name']:
skj_std.arguments_values['name'] = os.path.split(argv[0])[1]
output = os.path.join(os.getcwd(), skj_std.arguments_values['name']) # Output directory
if os.path.isdir(output):
i = 0 # If the dir already exists ...
output = output + '_' + str(i)
while os.path.isdir(output):
i += 1 # ... try output_i where i = max(i,0) + 1
output = output[:output.rfind('_')] + '_' + str(i)
try:
os.makedirs(output) # If we do not have write/execute in os.getcwd()...
except OSError as exception_msg:
if skj_std.arguments_values['ignoreerrors']:
skj_std.print_msg_verbose(err_=skj_std.create_error_msg("OUTPUT_DIR_CREATE", output))
output = skj_std.temp_directories['root'] # ... move output to temp dir we already have ...
else: # ... or die!
raise OSError(skj_std.create_error_msg("OUTPUT_DIR_CREATE", output))
filetype = ".mp4"
codec = "libx264"
ffmpeg = ["ffmpeg", "-f", "image2", "-r", str(skj_std.arguments_values['fps']), "-i", \
os.path.join(skj_std.temp_directories['gnuplot'], "g_%0" + \
str(len(str(skj_std.arguments_values['frames']))) + "d.png"),
"-c:v", codec, "-r", str(skj_std.arguments_values['fps']), \
os.path.join(output, skj_std.arguments_values['name'].split('/')[-1]) + filetype]
try:
subprocess.check_call(ffmpeg, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError as exception_msg:
raise OSError(skj_std.create_error_msg("PYTHON", exception_msg)) | gpl-2.0 |
trezorg/django | django/db/utils.py | 151 | 6225 | import inspect
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
DEFAULT_DB_ALIAS = 'default'
# Define some exceptions that mirror the PEP249 interface.
# We will rethrow any backend-specific errors using these
# common wrappers
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
def load_backend(backend_name):
try:
module = import_module('.base', 'django.db.backends.%s' % backend_name)
import warnings
warnings.warn(
"Short names for DATABASE_ENGINE are deprecated; prepend with 'django.db.backends.'",
DeprecationWarning
)
return module
except ImportError, e:
# Look for a fully qualified database backend name
try:
return import_module('.base', backend_name)
except ImportError, e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(__file__), 'backends')
try:
available_backends = [f for f in os.listdir(backend_dir)
if os.path.isdir(os.path.join(backend_dir, f))
and not f.startswith('.')]
except EnvironmentError:
available_backends = []
if backend_name.startswith('django.db.backends.'):
backend_name = backend_name[19:] # See #15621.
if backend_name not in available_backends:
error_msg = ("%r isn't an available database backend. \n" +
"Try using django.db.backends.XXX, where XXX is one of:\n %s\n" +
"Error was: %s") % \
(backend_name, ", ".join(map(repr, sorted(available_backends))), e_user)
raise ImproperlyConfigured(error_msg)
else:
raise # If there's some other error, this must be an error in Django itself.
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases):
self.databases = databases
self._connections = {}
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('OPTIONS', {})
conn.setdefault('TEST_CHARSET', None)
conn.setdefault('TEST_COLLATION', None)
conn.setdefault('TEST_NAME', None)
conn.setdefault('TEST_MIRROR', None)
conn.setdefault('TIME_ZONE', settings.TIME_ZONE)
for setting in ('NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'):
conn.setdefault(setting, '')
def __getitem__(self, alias):
if alias in self._connections:
return self._connections[alias]
self.ensure_defaults(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
self._connections[alias] = conn
return conn
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
class ConnectionRouter(object):
def __init__(self, routers):
self.routers = []
for r in routers:
if isinstance(r, basestring):
try:
module_name, klass_name = r.rsplit('.', 1)
module = import_module(module_name)
except ImportError, e:
raise ImproperlyConfigured('Error importing database router %s: "%s"' % (klass_name, e))
try:
router_class = getattr(module, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a database router name "%s"' % (module, klass_name))
else:
router = router_class()
else:
router = r
self.routers.append(router)
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_syncdb(self, db, model):
for router in self.routers:
try:
method = router.allow_syncdb
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(db, model)
if allow is not None:
return allow
return True
| bsd-3-clause |
wbrefvem/openshift-ansible | roles/openshift_health_checker/openshift_checks/mixins.py | 26 | 2322 | """
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
# permanent # pylint: disable=too-few-public-methods
# Reason: The mixin is not intended to stand on its own as a class.
def is_active(self):
"""Only run on non-containerized hosts."""
openshift_is_containerized = self.get_var("openshift_is_containerized")
return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized
class DockerHostMixin(object):
"""Mixin for checks that are only active on hosts that require Docker."""
dependencies = []
def is_active(self):
"""Only run on hosts that depend on Docker."""
group_names = set(self.get_var("group_names", default=[]))
needs_docker = set(["oo_nodes_to_config"])
if self.get_var("openshift_is_containerized"):
needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
def ensure_dependencies(self):
"""
Ensure that docker-related packages exist, but not on atomic hosts
(which would not be able to install but should already have them).
Returns: msg, failed
"""
if self.get_var("openshift_is_atomic"):
return "", False
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
msg = result.get("msg", "")
if result.get("failed"):
if "No package matching" in msg:
msg = "Ensure that all required dependencies can be installed via `yum`.\n"
msg = (
"Unable to install required packages on this host:\n"
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
return msg, failed
| apache-2.0 |
keedio/hue | desktop/core/ext-py/Django-1.6.10/django/utils/functional.py | 105 | 15418 | import copy
import operator
from functools import wraps
import sys
from django.utils import six
from django.utils.six.moves import copyreg
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but
# they also do setup, inserting the method into the
# dispatch dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), "Cannot call lazy() with both bytes and text return types."
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, method):
# Builds a wrapper around some magic method and registers that
# magic method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = method
return __wrapper__
__promise__ = classmethod(__promise__)
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# Introspection support
__dir__ = new_method_proxy(dir)
# Dictionary methods support
@new_method_proxy
def __getitem__(self, key):
return self[key]
@new_method_proxy
def __setitem__(self, key, value):
self[key] = value
@new_method_proxy
def __delitem__(self, key):
del self[key]
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
return copy.deepcopy(self._wrapped, memo)
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is
# a builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3.3 will call __reduce__ when pickling; this method is needed
# to serialize and deserialize correctly.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce_ex__(self, proto):
if proto >= 2:
# On Py3, since the default protocol is 3, pickle uses the
# ``__newobj__`` method (& more efficient opcodes) for writing.
return (self.__newobj__, (self.__class__,), self.__getstate__())
else:
# On Py2, the default protocol is 0 (for back-compat) & the above
# code fails miserably (see regression test). Instead, we return
# exactly what's returned if there's no ``__reduce__`` method at
# all.
return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<SimpleLazyObject: %r>' % repr_attr
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
__bool__ = new_method_proxy(bool) # Python 3
__nonzero__ = __bool__ # Python 2
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if sys.version_info >= (2, 7, 2):
from functools import total_ordering
else:
# For Python < 2.7.2. Python 2.6 does not have total_ordering, and
# total_ordering in 2.7 versions prior to 2.7.2 is buggy. See
# http://bugs.python.org/issue10042 for details. For these versions use
# code borrowed from Python 2.7.3.
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
| apache-2.0 |
ericmjonas/pySpaRSA | pysparsa/sparsa.py | 1 | 6917 | import numpy as np
import sys
import util
import time
default_phi_function = lambda x: np.sum(np.abs(x))
default_psi_function = util.soft
def sparsa(y, Aop, tau,
stopCriterion = 2, tolA = 0.01, tolD = 0.001, debias=0, maxiter = 10000,
maxiter_debias = 200, miniter = 5, miniter_debias=0,
init = 'zeros', bbVariant = 1, bbCycle = 1,
enforceMonotone = 0, enforceSafeguard = 0,
M = 5, sigma = 0.01, cont_steps = -1,
verbose=True,
alphamin = 1e-30, alphamax = 1e30, compute_mse = 0):
"""
Initializations:
'zeros':
"""
# FIXME add lots of sanity checks
# precompute A^T(y)
ATy = Aop.compute_ATx(y)
# sanity check phi function
psi_function = default_psi_function # FIXME use arbitary
phi_function = default_phi_function # FIXME use arbitary
if init == 'zeros':
x = Aop.compute_ATx(np.zeros(len(y)))
else:
raise NotImplementedError("unknown initialization")
# FIXME something to handle large tau?
# FIXME do something with true x?
nz_x = x == 0.0 # FIXME should this be a tolerance?
num_nz_x = np.sum(nz_x)
final_tau = tau
# FIXME some continuation stuff
final_stopCriterion = stopCriterion
final_tolA = tolA
if cont_steps == -1:
tau = sys.float_info.max
keep_continuation = True
cont_loop = 1
iter = 1
taus = []
mses = []
objective = []
times = []
t0 = time.time()
debias_start = 0
x_debias = []
while keep_continuation:
iterThisCycle = 0
# compute initial resideu and gradient
resid = Aop.compute_Ax(x) - y
gradq = Aop.compute_ATx(resid)
if cont_steps == -1:
temp_tau = max(final_tau, 0.2 * np.max(np.abs(gradq)))
if temp_tau > tau:
tau = final_tau
else:
tau = temp_tau;
if tau == final_tau:
stopCriterion = final_stopCriterion
tolA = final_tolA
keep_continuation = 0
else:
stopCriterion = 1
tolA = 1e-5
else:
tau = final_tau * cont_factors(cont_loop)
if cont_loop == cont_steps:
pass # FIXME don't handle this now
else:
pass # FIXME don't handle this now
taus.append(tau)
# compute and store initial value of the objective function for this tau
alpha = 1.0
f = 0.5 * np.dot(resid, resid) + tau * phi_function(x)
if enforceSafeguard:
f_lastM = f
# at the very start of the process, store the initial mses and objective in
# plotting arrays FIXME DO
keep_going = 1
while keep_going:
gradq = Aop.compute_ATx(resid)
# save current values
prev_x = x
prev_f = f
prev_resid = resid
# computation of step
cont_inner = True
while cont_inner:
x = psi_function(prev_x - gradq*1.0/alpha, tau/alpha)
dx = x - prev_x
Adx = Aop.compute_Ax(dx)
resid = prev_resid + Adx
f = 0.5 * np.dot(resid, resid) + tau * phi_function(x)
if enforceMonotone:
f_threshold = prev_f
elif enforceSafeguard:
f_threshold = max(f_lastM) - 0.5 * sigma * alpha * np.dot(dx, dx)
else:
f_threshold = np.inf
if f < f_threshold:
cont_inner = False
else:
# not good enough, increase alpha and try again
alpha = eta * alpha
if enforceSafeguard:
if len(f_lastM) > M:
f_lastm.pop(0)
f_lastM.append(f)
if verbose:
print "t=%4d, obj=%10.6f, alpha=%f" % (iter, f, alpha)
if bbVariant == 1: # Fixme pick a better name
# standard BB Choice of init alpha for next step
if iterThisCycle == 0 or enforceMonotone:
dd = np.dot(dx, dx)
dGd = np.dot(Adx, Adx)
alpha = min(alphamax, max(alphamin, dGd / (sys.float_info.min + dd)))
elif bbVariant == 2:
raise NotImplementedError("Unkown bbvariant")
else:
alpha = alpha * alphaFactor
# update counts
iter +=1
iterThisCycle = (iterThisCycle + 1) % bbCycle
objective.append(f)
times.append(time.time() - t0)
if compute_mse:
err = true - x
mses.append(np.dot(err, err))
if stopCriterion == 0: # FIXME better name
# compute stoping criterion based on the change of the number
# of non-zero components of the estimate
nz_x_prev = nz_x
nz_x = np.abs(x) != 0.0
num_nz_x = np.sum(nz_x)
if num_nz_x > 1:
criterionActiveSet = num_changes_Active / num_nz_x
keep_going = criterionActiveSet > tolA
if verbose:
print "Delta nz= %d (target = %f)" % (criterionActiveSet, tolA)
elif stopCriterion == 1:
criterionObjective = np.abs(f - prev_f) / prev_f
keep_going = criterionObjective > tolA
elif stopCriterion == 2:
# compute the "duality" stoping criterion
scaleFactor = np.linalg.norm(gradq, np.inf)
w = tau * prev_resid / scaleFactor
criterionDuality = 0.5 * np.dot(prev_resid, prev_resid) + tau * phi_function(prev_x) + 0.5 * np.dot(w, w) + np.dot(y, w)
criterionDuality /= prev_f
keep_going = criterionDuality > tolA
else:
raise NotImplementedError("Unknown Stopping Criterion %d" % stopCriterion)
if iter < miniter:
keep_going = True
elif iter > maxiter:
keep_going = False
cont_loop += 1
if verbose:
# print some stuf
pass
# FIXME add debias
# fixme MSEs
return {'x' : x,
'x_debias' : x_debias,
'objective' : objective,
'times' : times,
'debias_start' : debias_start,
'mses' : mses,
'taus' : taus}
| mit |
JohnGeorgiadis/invenio | invenio/legacy/bibsched/webapi.py | 4 | 4015 | # This file is part of Invenio.
# Copyright (C) 2011, 2012, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibSched live view engine implementation"""
from flask import url_for
from invenio.config import CFG_SITE_URL
from invenio.legacy.dbquery import run_sql
from invenio.legacy.bibsched.cli import CFG_MOTD_PATH
import os
import time
def get_css():
"""
Get css styles
"""
return """
<style type="text/css">
.task_waiting {
color: #ccbb22;
}
.task_running {
color: #33bb22;
}
.task_error {
color: #dd1100;
}
.admin_row_color{
background-color:#EBF7FF;
}
.last_updated {
color:#787878;
font-size:13px;
font-style:italic;
}
.mode {
font-size:14px;
}
.bibsched_status {
font-size:14px;
}
.clean_error{
border:solid 1px #CC0000;
background:#F7CBCA;
color:#CC0000;
font-size:14px;
font-weight:bold;
padding:4px;
max-width: 650px;
}
</style>
"""
def get_javascript():
"""
Get all required scripts
"""
js_scripts = """<script type="text/javascript" src="%(site_url)s/js/jquery.min.js">
</script>
<script type="text/javascript" src="%(custom)s">
</script>
""" % {'site_url':CFG_SITE_URL,
'custom': url_for('scheduler.static',
filename='js/scheduler/base.js') }
return js_scripts
def get_bibsched_tasks():
"""
Run SQL query to get all tasks present in bibsched queue
"""
waiting_tasks = run_sql("""SELECT id,proc,priority,user,runtime,status,
progress
FROM "schTASK"
WHERE (status='WAITING' OR
status='SLEEPING')
ORDER BY priority DESC, runtime ASC, id ASC""")
other_tasks = run_sql("""SELECT id,proc,priority,user,runtime,status,
progress
FROM "schTASK" WHERE status IN ('RUNNING',
'CONTINUING','SCHEDULED','ABOUT TO STOP',
'ABOUT TO SLEEP', 'DONE WITH ERRORS',
'ERRORS REPORTED')""")
return other_tasks + waiting_tasks
def get_bibsched_mode():
"""
Gets bibsched running mode: AUTOMATIC or MANUAL
"""
r = run_sql("""SELECT value FROM "schSTATUS" WHERE name = 'auto_mode' """)
try:
mode = bool(int(r[0][0]))
except (ValueError, IndexError):
mode = True
return mode and 'AUTOMATIC' or 'MANUAL'
def get_motd_msg():
"""
Gets content from motd file
"""
try:
motd_msg = open(CFG_MOTD_PATH).read().strip()
except IOError:
return ""
if len(motd_msg) > 0:
return "MOTD [%s] " % time.strftime("%Y-%m-%d %H:%M", time.localtime(os.path.getmtime(CFG_MOTD_PATH))) + motd_msg
else:
return ""
| gpl-2.0 |
sarvex/depot-tools | third_party/gsutil/gslib/commands/setcors.py | 51 | 5235 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.sax
from boto import handler
from boto.gs.cors import Cors
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import CONFIG_REQUIRED
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.exception import CommandException
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
from gslib.util import NO_MAX
_detailed_help_text = ("""
<B>SYNOPSIS</B>
gsutil setcors cors-xml-file uri...
<B>DESCRIPTION</B>
Sets the Cross-Origin Resource Sharing (CORS) configuration on one or more
buckets. This command is supported for buckets only, not objects. The
cors-xml-file specified on the command line should be a path to a local
file containing an XML document with the following structure:
<?xml version="1.0" ?>
<CorsConfig>
<Cors>
<Origins>
<Origin>http://origin1.example.com</Origin>
</Origins>
<Methods>
<Method>GET</Method>
</Methods>
<ResponseHeaders>
<ResponseHeader>Content-Type</ResponseHeader>
</ResponseHeaders>
</Cors>
</CorsConfig>
The above XML document explicitly allows cross-origin GET requests from
http://origin1.example.com and may include the Content-Type response header.
For more info about CORS, see http://www.w3.org/TR/cors/.
""")
class SetCorsCommand(Command):
"""Implementation of gsutil setcors command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'setcors',
# List of command name aliases.
COMMAND_NAME_ALIASES : [],
# Min number of args required by this command.
MIN_ARGS : 2,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : NO_MAX,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : '',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 1,
# True if must configure gsutil before running command.
CONFIG_REQUIRED : True,
}
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'setcors',
# List of help name aliases.
HELP_NAME_ALIASES : ['cors', 'cross-origin'],
# Type of help)
HELP_TYPE : HelpType.COMMAND_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Set a CORS XML document for one or more buckets',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
# Command entry point.
def RunCommand(self):
cors_arg = self.args[0]
uri_args = self.args[1:]
# Disallow multi-provider setcors requests.
storage_uri = self.UrisAreForSingleProvider(uri_args)
if not storage_uri:
raise CommandException('"%s" command spanning providers not allowed.' %
self.command_name)
# Open, read and parse file containing XML document.
cors_file = open(cors_arg, 'r')
cors_txt = cors_file.read()
cors_file.close()
cors_obj = Cors()
# Parse XML document and convert into Cors object.
h = handler.XmlHandler(cors_obj, None)
try:
xml.sax.parseString(cors_txt, h)
except xml.sax._exceptions.SAXParseException, e:
raise CommandException('Requested CORS is invalid: %s at line %s, '
'column %s' % (e.getMessage(), e.getLineNumber(),
e.getColumnNumber()))
# Iterate over URIs, expanding wildcards, and setting the CORS on each.
some_matched = False
for uri_str in uri_args:
for blr in self.WildcardIterator(uri_str):
uri = blr.GetUri()
if not uri.names_bucket():
raise CommandException('URI %s must name a bucket for the %s command'
% (str(uri), self.command_name))
some_matched = True
print 'Setting CORS on %s...' % uri
uri.set_cors(cors_obj, False, self.headers)
if not some_matched:
raise CommandException('No URIs matched')
return 0
| bsd-3-clause |
antgonza/qiime | scripts/summarize_taxa.py | 15 | 12694 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Catherine Lozupone", "Justin Kuczynski",
"Julia Goodrich", "Daniel McDonald", "Antonio Gonzalez Pena",
"Jesse Stombaugh", "Jose Carlos Clemente Litran",
"Greg Caporaso", "Jai Ram Rideout", "Adam Robbins-Pianka"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
from os.path import split, splitext, join
from sys import stdout, stderr
import numpy as np
from biom.table import Table
from biom import load_table
from qiime.util import (parse_command_line_parameters, make_option,
get_options_lookup, create_dir, write_biom_table)
from qiime.summarize_taxa import make_summary, add_summary_mapping
from qiime.parse import parse_mapping_file
from qiime.format import (
write_add_taxa_summary_mapping, format_add_taxa_summary_mapping)
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = (
"Summarize taxa and store results in a new table or appended to an "
"existing mapping file.")
script_info['script_description'] = (
"The summarize_taxa.py script provides summary information of the "
"representation of taxonomic groups within each sample. It takes an OTU "
"table that contains taxonomic information as input. The taxonomic level "
"for which the summary information is provided is designated with the -L "
"option. The meaning of this level will depend on the format of the taxon "
"strings that are returned from the taxonomy assignment step. The "
"taxonomy strings that are most useful are those that standardize the "
"taxonomic level with the depth in the taxonomic strings. For instance, "
"the Greengenes database uses the following levels: Level 1 = Kingdom "
"(e.g Bacteria), Level 2 = Phylum (e.g Actinobacteria), Level 3 = Class "
"(e.g Actinobacteria), Level 4 = Order (e.g Actinomycetales), Level 5 = "
"Family (e.g Streptomycetaceae), Level 6 = Genus (e.g Streptomyces), "
"Level 7 = Species (e.g mirabilis). "
"By default, the relative abundance of each taxonomic "
"group will be reported, but the raw counts can be returned if -a is "
"passed.\n\nBy default, taxa summary tables will be output in both "
"classic (tab-separated) and BIOM formats. The BIOM-formatted taxa "
"summary tables can be used as input to other QIIME scripts that accept "
"BIOM files.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("Examples:",
"Summarize taxa based at taxonomic levels 2, 3, 4, 5, and 6, and write "
"resulting taxa tables to the directory './tax'",
"%prog -i otu_table.biom -o ./tax"))
script_info['script_usage'].append(
("Examples:",
"Summarize taxa based at taxonomic levels 2, 3, 4, 5, and 6, and write "
"resulting mapping files to the directory './tax'",
"%prog -i otu_table.biom -o tax_mapping/ -m Fasting_Map.txt"))
script_info['output_description'] = (
"There are two possible output formats depending on whether or not a "
"mapping file is provided with the -m option. If a mapping file is not "
"provided, a table is returned where the taxonomic groups are each in a "
"row and there is a column for each sample. If a mapping file is "
"provided, the summary information will be appended to this file. "
"Specifically, a new column will be made for each taxonomic group to "
"which the relative abundances or raw counts will be added to the "
"existing rows for each sample. The addition of the taxonomic information "
"to the mapping file allows for taxonomic coloration of Principal "
"coordinates plots in Emperor. As described in the Emperor "
"documentation, principal coordinates plots can be dynamically colored based on "
"any of the metadata columns in the mapping file. Dynamic coloration of "
"the plots by the relative abundances of each taxonomic group can help to "
"distinguish which taxonomic groups are driving the clustering patterns.")
script_info['required_options'] = [
make_option('-i', '--otu_table_fp', dest='otu_table_fp',
help='Input OTU table filepath [REQUIRED]',
type='existing_filepath'),
]
script_info['optional_options'] = [
make_option('-L', '--level', default='2,3,4,5,6', type='string',
help='Taxonomic level to summarize by. [default: %default]'),
make_option('-m', '--mapping',
help='Input metadata mapping filepath. If supplied, then the '
'taxon information will be added to this file. This option is '
'useful for coloring PCoA plots by taxon abundance or to '
'perform statistical tests of taxon/mapping associations.',
type='existing_filepath'),
make_option('--md_identifier', default='taxonomy', type='string',
help='the relevant observation metadata key '
'[default: %default]'),
make_option('--md_as_string', default=False, action='store_true',
help='metadata is included as string [default: metadata is '
'included as list]'),
make_option('-d', '--delimiter', action='store', type='string',
dest='delimiter', default=';',
help='Delimiter separating taxonomy levels. '
'[default: %default]'),
make_option('-a', '--absolute_abundance', action='store_true',
dest='absolute_abundance', default=False,
help='If present, the absolute abundance of the lineage in '
'each sample is reported. By default, this script uses '
'relative abundance [default: %default]'),
make_option('-l', '--lower_percentage', type='float', default=None,
help='If present, OTUs having higher absolute abundance are '
'trimmed. To remove OTUs that make up more than 5% of the '
'total dataset you would pass 0.05. [default: %default]'),
make_option('-u', '--upper_percentage', type='float', default=None,
help='If present, OTUs having lower absolute abundance are '
'trimmed. To remove the OTUs that makes up less than 45% of '
'the total dataset you would pass 0.45. [default: %default]'),
make_option('-t', '--transposed_output', action='store_true',
dest='transposed_output', default=False,
help='If present, the output will be written transposed from '
'the regular output. This is helpful in cases when you want '
'to use Site Painter to visualize your data '
'[default: %default]'),
options_lookup['output_dir'],
make_option('--suppress_classic_table_output', action='store_true',
default=False, help='If present, the classic (TSV) format '
'taxon table will not be created in the output directory. '
'This option is ignored if -m/--mapping is present '
'[default: %default]'),
make_option('--suppress_biom_table_output', action='store_true',
default=False, help='If present, the BIOM-formatted taxon '
'table will not be created in the output directory. This '
'option is ignored if -m/--mapping is present '
'[default: %default]')
]
script_info['option_label'] = {'otu_table_fp': 'OTU table filepath',
'output_fp': 'Output filepath',
'mapping': 'QIIME-formatted mapping filepath',
'level': 'Summarize level',
'delimiter': 'Taxonomic delimiter',
'absolute_abundance': 'Use absolute abundance',
'lower_percentage': 'Top % of OTUs to remove',
'upper_percentage': 'Bottom % of OTUs to '
'remove'}
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
lower_percentage = opts.lower_percentage
upper_percentage = opts.upper_percentage
otu_table_fp = opts.otu_table_fp
otu_table = load_table(otu_table_fp)
delimiter = opts.delimiter
mapping_fp = opts.mapping
md_as_string = opts.md_as_string
md_identifier = opts.md_identifier
levels = opts.level.split(',')
suppress_classic_table_output = opts.suppress_classic_table_output
suppress_biom_table_output = opts.suppress_biom_table_output
if upper_percentage is not None and lower_percentage is not None:
raise ValueError(
"upper_percentage and lower_percentage are mutually exclusive")
if upper_percentage is not None and lower_percentage is not None and \
mapping:
raise ValueError("upper_percentage and lower_percentage can not be "
"using with mapping file")
if upper_percentage is not None and \
(upper_percentage < 0 or upper_percentage > 1.0):
raise ValueError('max_otu_percentage should be between 0.0 and 1.0')
if lower_percentage is not None and \
(lower_percentage < 0 or lower_percentage > 1.0):
raise ValueError('lower_percentage should be between 0.0 and 1.0')
if mapping_fp:
mapping_file = open(mapping_fp, 'U')
mapping, header, comments = parse_mapping_file(mapping_file)
# use the input Mapping file for producing the output filenames
map_dir_path, map_fname = split(mapping_fp)
map_basename, map_fname_ext = splitext(map_fname)
else:
if suppress_classic_table_output and suppress_biom_table_output:
option_parser.error("Both classic and BIOM output formats were "
"suppressed.")
if not opts.absolute_abundance:
otu_table = otu_table.norm(axis='sample', inplace=False)
# introduced output directory to will allow for multiple outputs
if opts.output_dir:
create_dir(opts.output_dir, False)
output_dir_path = opts.output_dir
else:
output_dir_path = './'
# use the input OTU table to produce the output filenames
dir_path, fname = split(otu_table_fp)
basename, fname_ext = splitext(fname)
# Iterate over the levels and generate a summarized taxonomy for each
for level in levels:
if mapping_fp:
# define output filename
output_fname = join(output_dir_path,
map_basename + '_L%s.txt' % (level))
summary, tax_order = add_summary_mapping(otu_table,
mapping,
int(level),
md_as_string,
md_identifier)
write_add_taxa_summary_mapping(summary, tax_order, mapping,
header, output_fname, delimiter)
else:
# define the output filename. The extension will be added to the
# end depending on the output format
output_fname = join(output_dir_path, basename + '_L%s' % level)
summary, header = make_summary(otu_table,
int(level),
upper_percentage,
lower_percentage,
md_as_string,
md_identifier)
sample_ids = header[1:]
observation_ids = []
data = []
for row in summary:
# Join taxonomic levels to create an observation ID.
observation_ids.append(delimiter.join(row[0]))
data.append(row[1:])
table = Table(np.asarray(data), observation_ids, sample_ids)
if opts.transposed_output:
table = table.transpose()
if not suppress_classic_table_output:
with open(output_fname + '.txt', 'w') as outfile:
outfile.write(table.to_tsv())
if not suppress_biom_table_output:
write_biom_table(table, output_fname + '.biom')
if __name__ == "__main__":
main()
| gpl-2.0 |
adit-chandra/tensorflow | tensorflow/python/kernel_tests/extract_image_patches_grad_test.py | 14 | 6046 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExtractImagePatches gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ExtractImagePatchesGradTest(test.TestCase):
"""Gradient-checking for ExtractImagePatches op."""
_TEST_CASES = [
{
'in_shape': [2, 5, 5, 3],
'ksizes': [1, 1, 1, 1],
'strides': [1, 2, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 7, 3],
'ksizes': [1, 3, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 8, 7, 3],
'ksizes': [1, 2, 2, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 8, 3],
'ksizes': [1, 3, 2, 1],
'strides': [1, 4, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [1, 15, 20, 3],
'ksizes': [1, 4, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 2, 4, 1],
},
{
'in_shape': [2, 7, 8, 1],
'ksizes': [1, 3, 2, 1],
'strides': [1, 3, 2, 1],
'rates': [1, 2, 2, 1],
},
{
'in_shape': [2, 8, 9, 4],
'ksizes': [1, 2, 2, 1],
'strides': [1, 4, 2, 1],
'rates': [1, 3, 2, 1],
},
]
@test_util.run_deprecated_v1
def testGradient(self):
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.cached_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
in_val = constant_op.constant(
np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
test_case['strides'],
test_case['rates'], padding)
out_shape = out_val.get_shape().as_list()
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testConstructGradientWithLargeImages(self):
batch_size = 4
height = 1024
width = 1024
ksize = 5
images = variable_scope.get_variable('inputs',
(batch_size, height, width, 1))
patches = array_ops.extract_image_patches(images,
ksizes=[1, ksize, ksize, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='SAME')
# Github issue: #20146
# tf.image.extract_image_patches() gradient very slow at graph construction
# time
gradients = gradients_impl.gradients(patches, images)
# Won't time out.
self.assertIsNotNone(gradients)
def _VariableShapeGradient(self, test_shape_pattern):
"""Use test_shape_pattern to infer which dimensions are of
variable size.
"""
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.test_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
test_shape = [
x if x is None else y for x, y in zip(test_shape_pattern, in_shape)
]
in_val = array_ops.placeholder(shape=test_shape, dtype=dtypes.float32)
feed_dict = {in_val: np.random.random(in_shape)}
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
test_case['strides'],
test_case['rates'], padding)
out_val_tmp = out_val.eval(feed_dict=feed_dict)
out_shape = out_val_tmp.shape
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def test_BxxC_Gradient(self):
self._VariableShapeGradient([-1, None, None, -1])
@test_util.run_deprecated_v1
def test_xHWx_Gradient(self):
self._VariableShapeGradient([None, -1, -1, None])
@test_util.run_deprecated_v1
def test_BHWC_Gradient(self):
self._VariableShapeGradient([-1, -1, -1, -1])
@test_util.run_deprecated_v1
def test_AllNone_Gradient(self):
self._VariableShapeGradient([None, None, None, None])
if __name__ == '__main__':
test.main()
| apache-2.0 |
recall704/django-r3call-blog | blogproject/thirdparty/south/db/sql_server/pyodbc.py | 118 | 19579 | from datetime import date, datetime, time
from warnings import warn
from django.db import models
from django.db.models import fields
from south.db import generic
from south.db.generic import delete_column_constraints, invalidate_table_constraints, copy_column_constraints
from south.exceptions import ConstraintDropped
from south.utils.py3 import string_types
try:
from django.utils.encoding import smart_text # Django >= 1.5
except ImportError:
from django.utils.encoding import smart_unicode as smart_text # Django < 1.5
from django.core.management.color import no_style
class DatabaseOperations(generic.DatabaseOperations):
"""
django-pyodbc (sql_server.pyodbc) implementation of database operations.
"""
backend_name = "pyodbc"
add_column_string = 'ALTER TABLE %s ADD %s;'
alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s %(type)s NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s %(type)s NOT NULL'
allows_combined_alters = False
drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
drop_constraint_string = 'ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s'
#create_check_constraint_sql = "ALTER TABLE %(table)s " + \
# generic.DatabaseOperations.add_check_constraint_fragment
create_foreign_key_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s " + \
"FOREIGN KEY (%(column)s) REFERENCES %(target)s"
create_unique_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s UNIQUE (%(columns)s)"
default_schema_name = "dbo"
has_booleans = False
@delete_column_constraints
def delete_column(self, table_name, name):
q_table_name, q_name = (self.quote_name(table_name), self.quote_name(name))
# Zap the constraints
for const in self._find_constraints_for_column(table_name,name):
params = {'table_name':q_table_name, 'constraint_name': const}
sql = self.drop_constraint_string % params
self.execute(sql, [])
# Zap the indexes
for ind in self._find_indexes_for_column(table_name,name):
params = {'table_name':q_table_name, 'index_name': ind}
sql = self.drop_index_string % params
self.execute(sql, [])
# Zap default if exists
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sql = "ALTER TABLE [%s] %s" % (table_name, drop_default)
self.execute(sql, [])
# Finally zap the column itself
self.execute(self.delete_column_string % (q_table_name, q_name), [])
def _find_indexes_for_column(self, table_name, name):
"Find the indexes that apply to a column, needed when deleting"
sql = """
SELECT si.name, si.id, sik.colid, sc.name
FROM dbo.sysindexes si WITH (NOLOCK)
INNER JOIN dbo.sysindexkeys sik WITH (NOLOCK)
ON sik.id = si.id
AND sik.indid = si.indid
INNER JOIN dbo.syscolumns sc WITH (NOLOCK)
ON si.id = sc.id
AND sik.colid = sc.colid
WHERE si.indid !=0
AND si.id = OBJECT_ID('%s')
AND sc.name = '%s'
"""
idx = self.execute(sql % (table_name, name), [])
return [i[0] for i in idx]
def _find_constraints_for_column(self, table_name, name, just_names=True):
"""
Find the constraints that apply to a column, needed when deleting. Defaults not included.
This is more general than the parent _constraints_affecting_columns, as on MSSQL this
includes PK and FK constraints.
"""
sql = """
SELECT CC.[CONSTRAINT_NAME]
,TC.[CONSTRAINT_TYPE]
,CHK.[CHECK_CLAUSE]
,RFD.TABLE_SCHEMA
,RFD.TABLE_NAME
,RFD.COLUMN_NAME
-- used for normalized names
,CC.TABLE_NAME
,CC.COLUMN_NAME
FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] TC
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE CC
ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS CHK
ON CHK.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND CHK.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND CHK.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'CHECK' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS REF
ON REF.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND REF.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND REF.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'FOREIGN KEY' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
ON RFD.CONSTRAINT_CATALOG = REF.UNIQUE_CONSTRAINT_CATALOG
AND RFD.CONSTRAINT_SCHEMA = REF.UNIQUE_CONSTRAINT_SCHEMA
AND RFD.CONSTRAINT_NAME = REF.UNIQUE_CONSTRAINT_NAME
WHERE CC.CONSTRAINT_CATALOG = CC.TABLE_CATALOG
AND CC.CONSTRAINT_SCHEMA = CC.TABLE_SCHEMA
AND CC.TABLE_CATALOG = %s
AND CC.TABLE_SCHEMA = %s
AND CC.TABLE_NAME = %s
AND CC.COLUMN_NAME = %s
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
table = self.execute(sql, [db_name, schema_name, table_name, name])
if just_names:
return [r[0] for r in table]
all = {}
for r in table:
cons_name, type = r[:2]
if type=='PRIMARY KEY' or type=='UNIQUE':
cons = all.setdefault(cons_name, (type,[]))
sql = '''
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
WHERE RFD.CONSTRAINT_CATALOG = %s
AND RFD.CONSTRAINT_SCHEMA = %s
AND RFD.TABLE_NAME = %s
AND RFD.CONSTRAINT_NAME = %s
'''
columns = self.execute(sql, [db_name, schema_name, table_name, cons_name])
cons[1].extend(col for col, in columns)
elif type=='CHECK':
cons = (type, r[2])
elif type=='FOREIGN KEY':
if cons_name in all:
raise NotImplementedError("Multiple-column foreign keys are not supported")
else:
cons = (type, r[3:6])
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
all[cons_name] = cons
return all
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
self._fix_field_definition(field)
if not ignore_constraints:
qn = self.quote_name
sch = qn(self._get_schema_name())
tab = qn(table_name)
table = ".".join([sch, tab])
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# no FK constraint on this field. That's OK.
pass
constraints = self._find_constraints_for_column(table_name, name, False)
for constraint in constraints.keys():
params = dict(table_name = table,
constraint_name = qn(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
ret_val = super(DatabaseOperations, self).alter_column(table_name, name, field, explicit_name, ignore_constraints=True)
if not ignore_constraints:
for cname, (ctype,args) in constraints.items():
params = dict(table = table,
constraint = qn(cname))
if ctype=='UNIQUE':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_unique_sql % params
elif ctype=='PRIMARY KEY':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_primary_key_string % params
elif ctype=='FOREIGN KEY':
continue
# Foreign keys taken care of below
#target = "%s.%s(%s)" % tuple(map(qn,args))
#params.update(column = qn(name), target = target)
#sql = self.create_foreign_key_sql % params
elif ctype=='CHECK':
warn(ConstraintDropped("CHECK "+ args, table_name, name))
continue
#TODO: Some check constraints should be restored; but not before the generic
# backend restores them.
#params['check'] = args
#sql = self.create_check_constraint_sql % params
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
self.execute(sql, [])
# Create foreign key if necessary
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
model = self.mock_model("FakeModelForIndexCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.execute(stmt)
return ret_val
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Historically, we used to set defaults here.
# But since South 0.8, we don't ever set defaults on alter-column -- we only
# use database-level defaults as scaffolding when adding columns.
# However, we still sometimes need to remove defaults in alter-column.
table_name = self.quote_name(params['table_name'])
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sqls.append((drop_default, []))
def _value_to_unquoted_literal(self, field, value):
# Start with the field's own translation
conn = self._get_connection()
value = field.get_db_prep_save(value, connection=conn)
# This is still a Python object -- nobody expects to need a literal.
if isinstance(value, string_types):
return smart_text(value)
elif isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
#TODO: Anybody else needs special translations?
return str(value)
def _default_value_workaround(self, value):
if isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _quote_string(self, s):
return "'" + s.replace("'","''") + "'"
def drop_column_default_sql(self, table_name, name, q_name=None):
"MSSQL specific drop default, which is a pain"
sql = """
SELECT object_name(cdefault)
FROM syscolumns
WHERE id = object_id('%s')
AND name = '%s'
"""
cons = self.execute(sql % (table_name, name), [])
if cons and cons[0] and cons[0][0]:
return "DROP CONSTRAINT %s" % cons[0][0]
return None
def _fix_field_definition(self, field):
if isinstance(field, (fields.BooleanField, fields.NullBooleanField)):
if field.default == True:
field.default = 1
if field.default == False:
field.default = 0
# This is copied from South's generic add_column, with two modifications:
# 1) The sql-server-specific call to _fix_field_definition
# 2) Removing a default, when needed, by calling drop_default and not the more general alter_column
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=False):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
self._fix_field_definition(field)
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = fields.NOT_PROVIDED
#self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
self.drop_default(table_name, name, field)
@invalidate_table_constraints
def drop_default(self, table_name, name, field):
fragment = self.drop_column_default_sql(table_name, name)
if fragment:
table_name = self.quote_name(table_name)
sql = " ".join(["ALTER TABLE", table_name, fragment])
self.execute(sql)
@invalidate_table_constraints
def create_table(self, table_name, field_defs):
# Tweak stuff as needed
for _, f in field_defs:
self._fix_field_definition(f)
# Run
super(DatabaseOperations, self).create_table(table_name, field_defs)
def _find_referencing_fks(self, table_name):
"MSSQL does not support cascading FKs when dropping tables, we need to implement."
# FK -- Foreign Keys
# UCTU -- Unique Constraints Table Usage
# FKTU -- Foreign Key Table Usage
# (last two are both really CONSTRAINT_TABLE_USAGE, different join conditions)
sql = """
SELECT FKTU.TABLE_SCHEMA as REFING_TABLE_SCHEMA,
FKTU.TABLE_NAME as REFING_TABLE_NAME,
FK.[CONSTRAINT_NAME] as FK_NAME
FROM [INFORMATION_SCHEMA].[REFERENTIAL_CONSTRAINTS] FK
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] UCTU
ON FK.UNIQUE_CONSTRAINT_CATALOG = UCTU.CONSTRAINT_CATALOG and
FK.UNIQUE_CONSTRAINT_NAME = UCTU.CONSTRAINT_NAME and
FK.UNIQUE_CONSTRAINT_SCHEMA = UCTU.CONSTRAINT_SCHEMA
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] FKTU
ON FK.CONSTRAINT_CATALOG = FKTU.CONSTRAINT_CATALOG and
FK.CONSTRAINT_NAME = FKTU.CONSTRAINT_NAME and
FK.CONSTRAINT_SCHEMA = FKTU.CONSTRAINT_SCHEMA
WHERE FK.CONSTRAINT_CATALOG = %s
AND UCTU.TABLE_SCHEMA = %s -- REFD_TABLE_SCHEMA
AND UCTU.TABLE_NAME = %s -- REFD_TABLE_NAME
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
return self.execute(sql, [db_name, schema_name, table_name])
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
if cascade:
refing = self._find_referencing_fks(table_name)
for schmea, table, constraint in refing:
table = ".".join(map (self.quote_name, [schmea, table]))
params = dict(table_name = table,
constraint_name = self.quote_name(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
cascade = False
super(DatabaseOperations, self).delete_table(table_name, cascade)
@copy_column_constraints
@delete_column_constraints
def rename_column(self, table_name, old, new):
"""
Renames the column of 'table_name' from 'old' to 'new'.
WARNING - This isn't transactional on MSSQL!
"""
if old == new:
# No Operation
return
# Examples on the MS site show the table name not being quoted...
params = (table_name, self.quote_name(old), self.quote_name(new))
self.execute("EXEC sp_rename '%s.%s', %s, 'COLUMN'" % params)
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
WARNING - This isn't transactional on MSSQL!
"""
if old_table_name == table_name:
# No Operation
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute('EXEC sp_rename %s, %s' % params)
def _db_type_for_alter_column(self, field):
return self._db_positive_type_for_alter_column(DatabaseOperations, field)
def _alter_add_column_mods(self, field, name, params, sqls):
return self._alter_add_positive_check(DatabaseOperations, field, name, params, sqls)
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
super(DatabaseOperations, self).delete_foreign_key(table_name, column)
# A FK also implies a non-unique index
find_index_sql = """
SELECT i.name -- s.name, t.name, c.name
FROM sys.tables t
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
INNER JOIN sys.indexes i ON i.object_id = t.object_id
INNER JOIN sys.index_columns ic ON ic.object_id = t.object_id
AND ic.index_id = i.index_id
INNER JOIN sys.columns c ON c.object_id = t.object_id
AND ic.column_id = c.column_id
WHERE i.is_unique=0 AND i.is_primary_key=0 AND i.is_unique_constraint=0
AND s.name = %s
AND t.name = %s
AND c.name = %s
"""
schema = self._get_schema_name()
indexes = self.execute(find_index_sql, [schema, table_name, column])
qn = self.quote_name
for index in (i[0] for i in indexes if i[0]): # "if i[0]" added because an empty name may return
self.execute("DROP INDEX %s on %s.%s" % (qn(index), qn(schema), qn(table_name) ))
| apache-2.0 |
bbannier/ROOT | interpreter/llvm/src/utils/llvm-build/llvmbuild/componentinfo.py | 10 | 17424 | """
Descriptor objects for entities that are part of the LLVM project.
"""
import ConfigParser
import StringIO
import sys
from util import *
class ParseError(Exception):
pass
class ComponentInfo(object):
"""
Base class for component descriptions.
"""
type_name = None
@staticmethod
def parse_items(items, has_dependencies = True):
kwargs = {}
kwargs['name'] = items.get_string('name')
kwargs['parent'] = items.get_optional_string('parent')
if has_dependencies:
kwargs['dependencies'] = items.get_list('dependencies')
return kwargs
def __init__(self, subpath, name, dependencies, parent):
if not subpath.startswith('/'):
raise ValueError,"invalid subpath: %r" % subpath
self.subpath = subpath
self.name = name
self.dependencies = list(dependencies)
# The name of the parent component to logically group this component
# under.
self.parent = parent
# The parent instance, once loaded.
self.parent_instance = None
self.children = []
# The original source path.
self._source_path = None
# A flag to mark "special" components which have some amount of magic
# handling (generally based on command line options).
self._is_special_group = False
def set_parent_instance(self, parent):
assert parent.name == self.parent, "Unexpected parent!"
self.parent_instance = parent
self.parent_instance.children.append(self)
def get_component_references(self):
"""get_component_references() -> iter
Return an iterator over the named references to other components from
this object. Items are of the form (reference-type, component-name).
"""
# Parent references are handled specially.
for r in self.dependencies:
yield ('dependency', r)
def get_llvmbuild_fragment(self):
abstract
def get_parent_target_group(self):
"""get_parent_target_group() -> ComponentInfo or None
Return the nearest parent target group (if any), or None if the
component is not part of any target group.
"""
# If this is a target group, return it.
if self.type_name == 'TargetGroup':
return self
# Otherwise recurse on the parent, if any.
if self.parent_instance:
return self.parent_instance.get_parent_target_group()
class GroupComponentInfo(ComponentInfo):
"""
Group components have no semantics as far as the build system are concerned,
but exist to help organize other components into a logical tree structure.
"""
type_name = 'Group'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
return GroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent):
ComponentInfo.__init__(self, subpath, name, [], parent)
def get_llvmbuild_fragment(self):
result = StringIO.StringIO()
print >>result, 'type = %s' % self.type_name
print >>result, 'name = %s' % self.name
print >>result, 'parent = %s' % self.parent
return result.getvalue()
class LibraryComponentInfo(ComponentInfo):
type_name = 'Library'
@staticmethod
def parse_items(items):
kwargs = ComponentInfo.parse_items(items)
kwargs['library_name'] = items.get_optional_string('library_name')
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
kwargs['installed'] = items.get_optional_bool('installed', True)
return kwargs
@staticmethod
def parse(subpath, items):
kwargs = LibraryComponentInfo.parse_items(items)
return LibraryComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent, library_name,
required_libraries, add_to_library_groups, installed):
ComponentInfo.__init__(self, subpath, name, dependencies, parent)
# If given, the name to use for the library instead of deriving it from
# the component name.
self.library_name = library_name
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
# Whether or not this library is installed.
self.installed = installed
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = StringIO.StringIO()
print >>result, 'type = %s' % self.type_name
print >>result, 'name = %s' % self.name
print >>result, 'parent = %s' % self.parent
if self.library_name is not None:
print >>result, 'library_name = %s' % self.library_name
if self.required_libraries:
print >>result, 'required_libraries = %s' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
print >>result, 'add_to_library_groups = %s' % ' '.join(
self.add_to_library_groups)
if not self.installed:
print >>result, 'installed = 0'
return result.getvalue()
def get_library_name(self):
return self.library_name or self.name
def get_prefixed_library_name(self):
"""
get_prefixed_library_name() -> str
Return the library name prefixed by the project name. This is generally
what the library name will be on disk.
"""
basename = self.get_library_name()
# FIXME: We need to get the prefix information from an explicit project
# object, or something.
if basename in ('gtest', 'gtest_main'):
return basename
return 'LLVM%s' % basename
def get_llvmconfig_component_name(self):
return self.get_library_name().lower()
class OptionalLibraryComponentInfo(LibraryComponentInfo):
type_name = "OptionalLibrary"
@staticmethod
def parse(subpath, items):
kwargs = LibraryComponentInfo.parse_items(items)
return OptionalLibraryComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent, library_name,
required_libraries, add_to_library_groups, installed):
LibraryComponentInfo.__init__(self, subpath, name, dependencies, parent,
library_name, required_libraries,
add_to_library_groups, installed)
class LibraryGroupComponentInfo(ComponentInfo):
type_name = 'LibraryGroup'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
return LibraryGroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent, required_libraries = [],
add_to_library_groups = []):
ComponentInfo.__init__(self, subpath, name, [], parent)
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = StringIO.StringIO()
print >>result, 'type = %s' % self.type_name
print >>result, 'name = %s' % self.name
print >>result, 'parent = %s' % self.parent
if self.required_libraries and not self._is_special_group:
print >>result, 'required_libraries = %s' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
print >>result, 'add_to_library_groups = %s' % ' '.join(
self.add_to_library_groups)
return result.getvalue()
def get_llvmconfig_component_name(self):
return self.name.lower()
class TargetGroupComponentInfo(ComponentInfo):
type_name = 'TargetGroup'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
kwargs['has_jit'] = items.get_optional_bool('has_jit', False)
kwargs['has_asmprinter'] = items.get_optional_bool('has_asmprinter',
False)
kwargs['has_asmparser'] = items.get_optional_bool('has_asmparser',
False)
kwargs['has_disassembler'] = items.get_optional_bool('has_disassembler',
False)
return TargetGroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent, required_libraries = [],
add_to_library_groups = [], has_jit = False,
has_asmprinter = False, has_asmparser = False,
has_disassembler = False):
ComponentInfo.__init__(self, subpath, name, [], parent)
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
# Whether or not this target supports the JIT.
self.has_jit = bool(has_jit)
# Whether or not this target defines an assembly printer.
self.has_asmprinter = bool(has_asmprinter)
# Whether or not this target defines an assembly parser.
self.has_asmparser = bool(has_asmparser)
# Whether or not this target defines an disassembler.
self.has_disassembler = bool(has_disassembler)
# Whether or not this target is enabled. This is set in response to
# configuration parameters.
self.enabled = False
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = StringIO.StringIO()
print >>result, 'type = %s' % self.type_name
print >>result, 'name = %s' % self.name
print >>result, 'parent = %s' % self.parent
if self.required_libraries:
print >>result, 'required_libraries = %s' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
print >>result, 'add_to_library_groups = %s' % ' '.join(
self.add_to_library_groups)
for bool_key in ('has_asmparser', 'has_asmprinter', 'has_disassembler',
'has_jit'):
if getattr(self, bool_key):
print >>result, '%s = 1' % (bool_key,)
return result.getvalue()
def get_llvmconfig_component_name(self):
return self.name.lower()
class ToolComponentInfo(ComponentInfo):
type_name = 'Tool'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items)
kwargs['required_libraries'] = items.get_list('required_libraries')
return ToolComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent,
required_libraries):
ComponentInfo.__init__(self, subpath, name, dependencies, parent)
# The names of the library components which are required to link this
# tool.
self.required_libraries = list(required_libraries)
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
def get_llvmbuild_fragment(self):
result = StringIO.StringIO()
print >>result, 'type = %s' % self.type_name
print >>result, 'name = %s' % self.name
print >>result, 'parent = %s' % self.parent
print >>result, 'required_libraries = %s' % ' '.join(
self.required_libraries)
return result.getvalue()
class BuildToolComponentInfo(ToolComponentInfo):
type_name = 'BuildTool'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items)
kwargs['required_libraries'] = items.get_list('required_libraries')
return BuildToolComponentInfo(subpath, **kwargs)
###
class IniFormatParser(dict):
def get_list(self, key):
# Check if the value is defined.
value = self.get(key)
if value is None:
return []
# Lists are just whitespace separated strings.
return value.split()
def get_optional_string(self, key):
value = self.get_list(key)
if not value:
return None
if len(value) > 1:
raise ParseError("multiple values for scalar key: %r" % key)
return value[0]
def get_string(self, key):
value = self.get_optional_string(key)
if not value:
raise ParseError("missing value for required string: %r" % key)
return value
def get_optional_bool(self, key, default = None):
value = self.get_optional_string(key)
if not value:
return default
if value not in ('0', '1'):
raise ParseError("invalid value(%r) for boolean property: %r" % (
value, key))
return bool(int(value))
def get_bool(self, key):
value = self.get_optional_bool(key)
if value is None:
raise ParseError("missing value for required boolean: %r" % key)
return value
_component_type_map = dict(
(t.type_name, t)
for t in (GroupComponentInfo,
LibraryComponentInfo, LibraryGroupComponentInfo,
ToolComponentInfo, BuildToolComponentInfo,
TargetGroupComponentInfo, OptionalLibraryComponentInfo))
def load_from_path(path, subpath):
# Load the LLVMBuild.txt file as an .ini format file.
parser = ConfigParser.RawConfigParser()
parser.read(path)
# Extract the common section.
if parser.has_section("common"):
common = IniFormatParser(parser.items("common"))
parser.remove_section("common")
else:
common = IniFormatParser({})
return common, _read_components_from_parser(parser, path, subpath)
def _read_components_from_parser(parser, path, subpath):
# We load each section which starts with 'component' as a distinct component
# description (so multiple components can be described in one file).
for section in parser.sections():
if not section.startswith('component'):
# We don't expect arbitrary sections currently, warn the user.
warning("ignoring unknown section %r in %r" % (section, path))
continue
# Determine the type of the component to instantiate.
if not parser.has_option(section, 'type'):
fatal("invalid component %r in %r: %s" % (
section, path, "no component type"))
type_name = parser.get(section, 'type')
type_class = _component_type_map.get(type_name)
if type_class is None:
fatal("invalid component %r in %r: %s" % (
section, path, "invalid component type: %r" % type_name))
# Instantiate the component based on the remaining values.
try:
info = type_class.parse(subpath,
IniFormatParser(parser.items(section)))
except TypeError:
print >>sys.stderr, "error: invalid component %r in %r: %s" % (
section, path, "unable to instantiate: %r" % type_name)
import traceback
traceback.print_exc()
raise SystemExit, 1
except ParseError,e:
fatal("unable to load component %r in %r: %s" % (
section, path, e.message))
info._source_path = path
yield info
| lgpl-2.1 |
jusdng/odoo | addons/account_budget/report/analytic_account_budget_report.py | 360 | 7589 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class analytic_account_budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(analytic_account_budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
accounts = self.pool.get('account.analytic.account').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
obj_c_budget = self.pool.get('crossovered.budget')
for account_id in accounts:
res = {}
b_line_ids = []
for line in account_id.crossovered_budget_line:
b_line_ids.append(line.id)
if not b_line_ids:
return []
d_from = form['date_from']
d_to = form['date_to']
self.cr.execute('SELECT DISTINCT(crossovered_budget_id) FROM crossovered_budget_lines WHERE id =ANY(%s)',(b_line_ids,))
budget_ids = self.cr.fetchall()
context = {'wizard_date_from':d_from,'wizard_date_to':d_to}
for i in range(0, len(budget_ids)):
budget_name = obj_c_budget.browse(self.cr, self.uid, [budget_ids[i][0]])
res= {
'b_id':'-1',
'a_id':'-1',
'name':budget_name[0].name,
'status':1,
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', b_line_ids), ('crossovered_budget_id','=',budget_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0
done_budget = []
for line in line_id:
if line.id in b_line_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
record['perc'] += line.percentage
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
else:
res1 = {
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': line.percentage
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] +=tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] +=tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] +=tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_analyticaccountbudget(osv.AbstractModel):
_name = 'report.account_budget.report_analyticaccountbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_analyticaccountbudget'
_wrapped_report_class = analytic_account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alejandrodob/mamba | mamba/reporter.py | 7 | 1647 | # -*- coding: utf-8 -*-
import datetime
class Reporter(object):
def __init__(self, *formatters):
self.listeners = formatters
@property
def failed_count(self):
return len(self.failed_examples)
def start(self):
self.begin = datetime.datetime.utcnow()
self.duration = datetime.timedelta(0)
self.example_count = 0
self.pending_count = 0
self.failed_examples = []
def example_started(self, example):
self.example_count += 1
self.notify('example_started', example)
def example_passed(self, example):
self.notify('example_passed', example)
def example_failed(self, example):
self.failed_examples.append(example)
self.notify('example_failed', example)
def example_pending(self, example):
self.pending_count += 1
self.notify('example_pending', example)
def example_group_started(self, example_group):
self.notify('example_group_started', example_group)
def example_group_finished(self, example_group):
self.notify('example_group_finished', example_group)
def example_group_pending(self, example_group):
self.notify('example_group_pending', example_group)
def finish(self):
self.stop()
self.notify('summary', self.duration, self.example_count, self.failed_count, self.pending_count)
self.notify('failures', self.failed_examples)
def stop(self):
self.duration = datetime.datetime.utcnow() - self.begin
def notify(self, event, *args):
for listener in self.listeners:
getattr(listener, event)(*args)
| mit |
dezynetechnologies/odoo | addons/hr_contract/hr_contract.py | 302 | 5377 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
haoxli/web-testing-service | wts/tests/csp/csp_script-src_unsafe-inline_unsafe-eval.py | 30 | 2983 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "script-src 'self' 'unsafe-inline' 'unsafe-eval'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_script-src_unsafe-inline_unsafe-eval</title>
<link rel="author" title="Intel" href="http://www.intel.com/"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="script-src 'self' 'unsafe-inline' 'unsafe-eval'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script src="support/csp.js"></script>
<script>
test(function() {
assert_equals(X, 10, "X is 10");
assert_equals(Y, 27, "Y is X+17");
}, document.title + "_allowed_int");
test(function() {
assert_equals(eval(X + Y), 37, "eval(X + Y) should be 37");
}, document.title + "_allowed_inline");
</script>
</body>
</html> """
| bsd-3-clause |
MattCCS/PyVault | site-packages/pip/_vendor/requests/packages/urllib3/util/retry.py | 153 | 10350 | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
_observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| mit |
jiangzhixiao/odoo | addons/base_geolocalize/__openerp__.py | 49 | 1459 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partners Geo-Localization',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
Partners geolocalization
========================
""",
'author': 'OpenERP SA',
'depends': ['base'],
'demo': [
],
'data': [
'views/res_partner_view.xml',
],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aforalee/keystone | keystone/contrib/federation/utils.py | 6 | 26299 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for Federation Extension."""
import ast
import re
import jsonschema
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone import exception
from keystone.i18n import _, _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
"type": "object",
"required": ['rules'],
"properties": {
"rules": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"required": ['local', 'remote'],
"additionalProperties": False,
"properties": {
"local": {
"type": "array"
},
"remote": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"oneOf": [
{"$ref": "#/definitions/empty"},
{"$ref": "#/definitions/any_one_of"},
{"$ref": "#/definitions/not_any_of"},
{"$ref": "#/definitions/blacklist"},
{"$ref": "#/definitions/whitelist"}
],
}
}
}
}
}
},
"definitions": {
"empty": {
"type": "object",
"required": ['type'],
"properties": {
"type": {
"type": "string"
},
},
"additionalProperties": False,
},
"any_one_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'any_one_of'],
"properties": {
"type": {
"type": "string"
},
"any_one_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"not_any_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'not_any_of'],
"properties": {
"type": {
"type": "string"
},
"not_any_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"blacklist": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'blacklist'],
"properties": {
"type": {
"type": "string"
},
"blacklist": {
"type": "array"
}
}
},
"whitelist": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'whitelist'],
"properties": {
"type": {
"type": "string"
},
"whitelist": {
"type": "array"
}
}
}
}
}
class DirectMaps(object):
"""An abstraction around the remote matches.
Each match is treated internally as a list.
"""
def __init__(self):
self._matches = []
def add(self, values):
"""Adds a matched value to the list of matches.
:param list value: the match to save
"""
self._matches.append(values)
def __getitem__(self, idx):
"""Used by Python when executing ``''.format(*DirectMaps())``."""
value = self._matches[idx]
if isinstance(value, list) and len(value) == 1:
return value[0]
else:
return value
def validate_mapping_structure(ref):
v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
messages = ''
for error in sorted(v.iter_errors(ref), key=str):
messages = messages + error.message + "\n"
if messages:
raise exception.ValidationError(messages)
def validate_expiration(token_ref):
if timeutils.utcnow() > token_ref.expires:
raise exception.Unauthorized(_('Federation token is expired'))
def validate_groups_cardinality(group_ids, mapping_id):
"""Check if groups list is non-empty.
:param group_ids: list of group ids
:type group_ids: list of str
:raises exception.MissingGroups: if ``group_ids`` cardinality is 0
"""
if not group_ids:
raise exception.MissingGroups(mapping_id=mapping_id)
def get_remote_id_parameter(protocol):
# NOTE(marco-fargetta): Since we support any protocol ID, we attempt to
# retrieve the remote_id_attribute of the protocol ID. If it's not
# registered in the config, then register the option and try again.
# This allows the user to register protocols other than oidc and saml2.
remote_id_parameter = None
try:
remote_id_parameter = CONF[protocol]['remote_id_attribute']
except AttributeError:
CONF.register_opt(cfg.StrOpt('remote_id_attribute'),
group=protocol)
try:
remote_id_parameter = CONF[protocol]['remote_id_attribute']
except AttributeError:
pass
if not remote_id_parameter:
LOG.debug('Cannot find "remote_id_attribute" in configuration '
'group %s. Trying default location in '
'group federation.', protocol)
remote_id_parameter = CONF.federation.remote_id_attribute
return remote_id_parameter
def validate_idp(idp, protocol, assertion):
"""Validate the IdP providing the assertion is registered for the mapping.
"""
remote_id_parameter = get_remote_id_parameter(protocol)
if not remote_id_parameter or not idp['remote_ids']:
LOG.debug('Impossible to identify the IdP %s ', idp['id'])
# If nothing is defined, the administrator may want to
# allow the mapping of every IdP
return
try:
idp_remote_identifier = assertion[remote_id_parameter]
except KeyError:
msg = _('Could not find Identity Provider identifier in '
'environment')
raise exception.ValidationError(msg)
if idp_remote_identifier not in idp['remote_ids']:
msg = _('Incoming identity provider identifier not included '
'among the accepted identifiers.')
raise exception.Forbidden(msg)
def validate_groups_in_backend(group_ids, mapping_id, identity_api):
"""Iterate over group ids and make sure they are present in the backend/
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
"""
for group_id in group_ids:
try:
identity_api.get_group(group_id)
except exception.GroupNotFound:
raise exception.MappedGroupNotFound(
group_id=group_id, mapping_id=mapping_id)
def validate_groups(group_ids, mapping_id, identity_api):
"""Check group ids cardinality and check their existence in the backend.
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
:raises: exception.MissingGroups
"""
validate_groups_cardinality(group_ids, mapping_id)
validate_groups_in_backend(group_ids, mapping_id, identity_api)
# TODO(marek-denis): Optimize this function, so the number of calls to the
# backend are minimized.
def transform_to_group_ids(group_names, mapping_id,
identity_api, resource_api):
"""Transform groups identitified by name/domain to their ids
Function accepts list of groups identified by a name and domain giving
a list of group ids in return.
Example of group_names parameter::
[
{
"name": "group_name",
"domain": {
"id": "domain_id"
},
},
{
"name": "group_name_2",
"domain": {
"name": "domain_name"
}
}
]
:param group_names: list of group identified by name and its domain.
:type group_names: list
:param mapping_id: id of the mapping used for mapping assertion into
local credentials
:type mapping_id: str
:param identity_api: identity_api object
:param resource_api: resource manager object
:returns: generator object with group ids
:raises: excepton.MappedGroupNotFound: in case asked group doesn't
exist in the backend.
"""
def resolve_domain(domain):
"""Return domain id.
Input is a dictionary with a domain identified either by a ``id`` or a
``name``. In the latter case system will attempt to fetch domain object
from the backend.
:returns: domain's id
:rtype: str
"""
domain_id = (domain.get('id') or
resource_api.get_domain_by_name(
domain.get('name')).get('id'))
return domain_id
for group in group_names:
try:
group_dict = identity_api.get_group_by_name(
group['name'], resolve_domain(group['domain']))
yield group_dict['id']
except exception.GroupNotFound:
LOG.debug('Skip mapping group %s; has no entry in the backend',
group['name'])
def get_assertion_params_from_env(context):
LOG.debug('Environment variables: %s', context['environment'])
prefix = CONF.federation.assertion_prefix
for k, v in list(context['environment'].items()):
if k.startswith(prefix):
yield (k, v)
class UserType(object):
"""User mapping type."""
EPHEMERAL = 'ephemeral'
LOCAL = 'local'
class RuleProcessor(object):
"""A class to process assertions and mapping rules."""
class _EvalType(object):
"""Mapping rule evaluation types."""
ANY_ONE_OF = 'any_one_of'
NOT_ANY_OF = 'not_any_of'
BLACKLIST = 'blacklist'
WHITELIST = 'whitelist'
def __init__(self, rules):
"""Initialize RuleProcessor.
Example rules can be found at:
:class:`keystone.tests.mapping_fixtures`
:param rules: rules from a mapping
:type rules: dict
"""
self.rules = rules
def process(self, assertion_data):
"""Transform assertion to a dictionary of user name and group ids
based on mapping rules.
This function will iterate through the mapping rules to find
assertions that are valid.
:param assertion_data: an assertion containing values from an IdP
:type assertion_data: dict
Example assertion_data::
{
'Email': '[email protected]',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester'
}
:returns: dictionary with user and group_ids
The expected return structure is::
{
'name': 'foobar',
'group_ids': ['abc123', 'def456'],
'group_names': [
{
'name': 'group_name_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_1_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_2',
'domain': {
'id': 'xyz132'
}
}
]
}
"""
# Assertions will come in as string key-value pairs, and will use a
# semi-colon to indicate multiple values, i.e. groups.
# This will create a new dictionary where the values are arrays, and
# any multiple values are stored in the arrays.
LOG.debug('assertion data: %s', assertion_data)
assertion = {n: v.split(';') for n, v in assertion_data.items()
if isinstance(v, six.string_types)}
LOG.debug('assertion: %s', assertion)
identity_values = []
LOG.debug('rules: %s', self.rules)
for rule in self.rules:
direct_maps = self._verify_all_requirements(rule['remote'],
assertion)
# If the compare comes back as None, then the rule did not apply
# to the assertion data, go on to the next rule
if direct_maps is None:
continue
# If there are no direct mappings, then add the local mapping
# directly to the array of saved values. However, if there is
# a direct mapping, then perform variable replacement.
if not direct_maps:
identity_values += rule['local']
else:
for local in rule['local']:
new_local = self._update_local_mapping(local, direct_maps)
identity_values.append(new_local)
LOG.debug('identity_values: %s', identity_values)
mapped_properties = self._transform(identity_values)
LOG.debug('mapped_properties: %s', mapped_properties)
return mapped_properties
def _transform(self, identity_values):
"""Transform local mappings, to an easier to understand format.
Transform the incoming array to generate the return value for
the process function. Generating content for Keystone tokens will
be easier if some pre-processing is done at this level.
:param identity_values: local mapping from valid evaluations
:type identity_values: array of dict
Example identity_values::
[
{
'group': {'id': '0cd5e9'},
'user': {
'email': '[email protected]'
},
},
{
'groups': ['member', 'admin', tester'],
'domain': {
'name': 'default_domain'
}
}
]
:returns: dictionary with user name, group_ids and group_names.
:rtype: dict
"""
def extract_groups(groups_by_domain):
for groups in list(groups_by_domain.values()):
for group in list({g['name']: g for g in groups}.values()):
yield group
def normalize_user(user):
"""Parse and validate user mapping."""
user_type = user.get('type')
if user_type and user_type not in (UserType.EPHEMERAL,
UserType.LOCAL):
msg = _("User type %s not supported") % user_type
raise exception.ValidationError(msg)
if user_type is None:
user_type = user['type'] = UserType.EPHEMERAL
if user_type == UserType.EPHEMERAL:
user['domain'] = {
'id': CONF.federation.federated_domain_name
}
# initialize the group_ids as a set to eliminate duplicates
user = {}
group_ids = set()
group_names = list()
groups_by_domain = dict()
for identity_value in identity_values:
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user:
LOG.warning(_LW('Ignoring user name'))
else:
user = identity_value.get('user')
if 'group' in identity_value:
group = identity_value['group']
if 'id' in group:
group_ids.add(group['id'])
elif 'name' in group:
domain = (group['domain'].get('name') or
group['domain'].get('id'))
groups_by_domain.setdefault(domain, list()).append(group)
group_names.extend(extract_groups(groups_by_domain))
if 'groups' in identity_value:
if 'domain' not in identity_value:
msg = _("Invalid rule: %(identity_value)s. Both 'groups' "
"and 'domain' keywords must be specified.")
msg = msg % {'identity_value': identity_value}
raise exception.ValidationError(msg)
# In this case, identity_value['groups'] is a string
# representation of a list, and we want a real list. This is
# due to the way we do direct mapping substitutions today (see
# function _update_local_mapping() )
try:
group_names_list = ast.literal_eval(
identity_value['groups'])
except ValueError:
group_names_list = [identity_value['groups']]
domain = identity_value['domain']
group_dicts = [{'name': name, 'domain': domain} for name in
group_names_list]
group_names.extend(group_dicts)
normalize_user(user)
return {'user': user,
'group_ids': list(group_ids),
'group_names': group_names}
def _update_local_mapping(self, local, direct_maps):
"""Replace any {0}, {1} ... values with data from the assertion.
:param local: local mapping reference that needs to be updated
:type local: dict
:param direct_maps: identity values used to update local
:type direct_maps: keystone.contrib.federation.utils.DirectMaps
Example local::
{'user': {'name': '{0} {1}', 'email': '{2}'}}
Example direct_maps::
['Bob', 'Thompson', '[email protected]']
:returns: new local mapping reference with replaced values.
The expected return structure is::
{'user': {'name': 'Bob Thompson', 'email': '[email protected]'}}
"""
LOG.debug('direct_maps: %s', direct_maps)
LOG.debug('local: %s', local)
new = {}
for k, v in local.items():
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
new_value = v.format(*direct_maps)
new[k] = new_value
return new
def _verify_all_requirements(self, requirements, assertion):
"""Go through the remote requirements of a rule, and compare against
the assertion.
If a value of ``None`` is returned, the rule with this assertion
doesn't apply.
If an array of zero length is returned, then there are no direct
mappings to be performed, but the rule is valid.
Otherwise, then it will first attempt to filter the values according
to blacklist or whitelist rules and finally return the values in
order, to be directly mapped.
:param requirements: list of remote requirements from rules
:type requirements: list
Example requirements::
[
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Customer"
]
},
{
"type": "ADFS_GROUPS",
"whitelist": [
"g1", "g2", "g3", "g4"
]
}
]
:param assertion: dict of attributes from an IdP
:type assertion: dict
Example assertion::
{
'UserName': ['testacct'],
'LastName': ['Account'],
'orgPersonType': ['Tester'],
'Email': ['[email protected]'],
'FirstName': ['Test'],
'ADFS_GROUPS': ['g1', 'g2']
}
:returns: identity values used to update local
:rtype: keystone.contrib.federation.utils.DirectMaps or None
"""
direct_maps = DirectMaps()
for requirement in requirements:
requirement_type = requirement['type']
direct_map_values = assertion.get(requirement_type)
regex = requirement.get('regex', False)
if not direct_map_values:
return None
any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
if any_one_values is not None:
if self._evaluate_requirement(any_one_values,
direct_map_values,
self._EvalType.ANY_ONE_OF,
regex):
continue
else:
return None
not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
if not_any_values is not None:
if self._evaluate_requirement(not_any_values,
direct_map_values,
self._EvalType.NOT_ANY_OF,
regex):
continue
else:
return None
# If 'any_one_of' or 'not_any_of' are not found, then values are
# within 'type'. Attempt to find that 'type' within the assertion,
# and filter these values if 'whitelist' or 'blacklist' is set.
blacklisted_values = requirement.get(self._EvalType.BLACKLIST)
whitelisted_values = requirement.get(self._EvalType.WHITELIST)
# If a blacklist or whitelist is used, we want to map to the
# whole list instead of just its values separately.
if blacklisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v not in blacklisted_values]
elif whitelisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v in whitelisted_values]
direct_maps.add(direct_map_values)
LOG.debug('updating a direct mapping: %s', direct_map_values)
return direct_maps
def _evaluate_values_by_regex(self, values, assertion_values):
for value in values:
for assertion_value in assertion_values:
if re.search(value, assertion_value):
return True
return False
def _evaluate_requirement(self, values, assertion_values,
eval_type, regex):
"""Evaluate the incoming requirement and assertion.
If the requirement type does not exist in the assertion data, then
return False. If regex is specified, then compare the values and
assertion values. Otherwise, grab the intersection of the values
and use that to compare against the evaluation type.
:param values: list of allowed values, defined in the requirement
:type values: list
:param assertion_values: The values from the assertion to evaluate
:type assertion_values: list/string
:param eval_type: determine how to evaluate requirements
:type eval_type: string
:param regex: perform evaluation with regex
:type regex: boolean
:returns: boolean, whether requirement is valid or not.
"""
if regex:
any_match = self._evaluate_values_by_regex(values,
assertion_values)
else:
any_match = bool(set(values).intersection(set(assertion_values)))
if any_match and eval_type == self._EvalType.ANY_ONE_OF:
return True
if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
return True
return False
def assert_enabled_identity_provider(federation_api, idp_id):
identity_provider = federation_api.get_idp(idp_id)
if identity_provider.get('enabled') is not True:
msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
def assert_enabled_service_provider_object(service_provider):
if service_provider.get('enabled') is not True:
sp_id = service_provider['id']
msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
| apache-2.0 |
don-github/edx-platform | lms/djangoapps/django_comment_client/management/commands/assign_role.py | 251 | 1144 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django_comment_common.models import Role
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the role instead of adding it'),
)
args = '<user|email> <role> <course_id>'
help = 'Assign a discussion forum role to a user '
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Usage is assign_role {0}'.format(self.args))
name_or_email, role, course_id = args
role = Role.objects.get(name=role, course_id=course_id)
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
if options['remove']:
user.roles.remove(role)
else:
user.roles.add(role)
print 'Success!'
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.