code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
'''tzinfo timezone information for America/Inuvik.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Inuvik(DstTzInfo):
'''America/Inuvik timezone definition. See datetime.tzinfo for details'''
zone = 'America/Inuvik'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,4,14,10,0,0),
d(1918,10,27,9,0,0),
d(1919,5,25,10,0,0),
d(1919,11,1,7,0,0),
d(1942,2,9,10,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,9,0,0),
d(1965,4,25,8,0,0),
d(1965,10,31,8,0,0),
d(1979,4,29,10,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PWT'),
i(-25200,3600,'PPT'),
i(-28800,0,'PST'),
i(-21600,7200,'PDDT'),
i(-28800,0,'PST'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Inuvik = Inuvik()
| newvem/pytz | pytz/zoneinfo/America/Inuvik.py | Python | mit | 5,554 |
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cloudengine import ce_is_is_instance
from units.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from units.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_is_is_instance
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'before.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'after.txt')
update = ['isis 100', 'vpn-instance __public__']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='present')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'after.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'before.txt')
update = ['undo isis 100']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='absent')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
| roadmapper/ansible | test/units/modules/network/cloudengine/test_ce_is_is_instance.py | Python | gpl-3.0 | 2,883 |
# -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import ir_model
from . import trgm_index
| ddico/server-tools | base_search_fuzzy/models/__init__.py | Python | agpl-3.0 | 136 |
#-*- coding: utf-8 -*-
from openerp.osv import fields, osv
class project_issue(osv.osv):
_inherit = 'project.issue'
_columns = {
'project_issue_solution_id': fields.many2one('project.issue.solution', 'Linked Solution'),
'issue_description': fields.html('Issue Description'),
'solution_description': fields.html('Solution Description'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jorsea/odoo-addons | project_issue_solutions/project_issue.py | Python | agpl-3.0 | 445 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.contrib.data.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _UniqueDataset(dataset)
return _apply_fn
class _UniqueDataset(dataset_ops.Dataset):
"""A `Dataset` contains the unique elements from its input."""
def __init__(self, input_dataset):
"""See `unique()` for details."""
super(_UniqueDataset, self).__init__()
self._input_dataset = input_dataset
if input_dataset.output_types not in (dtypes.int32, dtypes.int64,
dtypes.string):
raise TypeError(
"`tf.contrib.data.unique()` only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component.")
def _as_variant_tensor(self):
return gen_dataset_ops.unique_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
| drpngx/tensorflow | tensorflow/contrib/data/python/ops/unique.py | Python | apache-2.0 | 2,748 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
meter.c.counter_volume.alter(type=Float(53))
| ityaptin/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py | Python | apache-2.0 | 888 |
from ..broker import Broker
class DiscoverySettingBroker(Broker):
controller = "discovery_settings"
def index(self, **kwargs):
"""Lists the available discovery settings. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available discovery settings matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against discovery settings, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: range_value, range_type, ping_sweep_ind (1, 0), discovery_status (INCLUDE, EXCLUDE, IGNORE), VirtualNetworkName.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available discovery settings matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: UnitID, blackout_duration, cidr_count, created_at, created_by, discovery_status, id, ping_sweep_ind, port_control_blackout_duration, range_end, range_end_numeric, range_mask, range_start, range_start_numeric, range_type, range_value, smart_ping_sweep_ind, start_blackout_schedule, start_port_control_blackout_schedule, updated_at, updated_by, virtual_network_id.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UnitID: The operator to apply to the field UnitID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UnitID: The internal NetMRI identifier collector assigned to the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UnitID: If op_UnitID is specified, the field named in this input will be compared to the value in UnitID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UnitID must be specified if op_UnitID is specified.
:type val_f_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UnitID: If op_UnitID is specified, this value will be compared to the value in UnitID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UnitID must be specified if op_UnitID is specified.
:type val_c_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_blackout_duration: The operator to apply to the field blackout_duration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. blackout_duration: The blackout duration in minutes. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_blackout_duration: If op_blackout_duration is specified, the field named in this input will be compared to the value in blackout_duration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_blackout_duration must be specified if op_blackout_duration is specified.
:type val_f_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_blackout_duration: If op_blackout_duration is specified, this value will be compared to the value in blackout_duration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_blackout_duration must be specified if op_blackout_duration is specified.
:type val_c_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cidr_count: The operator to apply to the field cidr_count. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cidr_count: Number of CIDRs in discovery range. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cidr_count: If op_cidr_count is specified, the field named in this input will be compared to the value in cidr_count using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cidr_count must be specified if op_cidr_count is specified.
:type val_f_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cidr_count: If op_cidr_count is specified, this value will be compared to the value in cidr_count using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cidr_count must be specified if op_cidr_count is specified.
:type val_c_cidr_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the discovery setting was created. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_by: The operator to apply to the field created_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_by: The user that created the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_by: If op_created_by is specified, the field named in this input will be compared to the value in created_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_by must be specified if op_created_by is specified.
:type val_f_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_by: If op_created_by is specified, this value will be compared to the value in created_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_by must be specified if op_created_by is specified.
:type val_c_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_discovery_status: The operator to apply to the field discovery_status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_discovery_status: If op_discovery_status is specified, the field named in this input will be compared to the value in discovery_status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_discovery_status must be specified if op_discovery_status is specified.
:type val_f_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_discovery_status: If op_discovery_status is specified, this value will be compared to the value in discovery_status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_discovery_status must be specified if op_discovery_status is specified.
:type val_c_discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ping_sweep_ind: The operator to apply to the field ping_sweep_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ping_sweep_ind: If op_ping_sweep_ind is specified, the field named in this input will be compared to the value in ping_sweep_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ping_sweep_ind must be specified if op_ping_sweep_ind is specified.
:type val_f_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ping_sweep_ind: If op_ping_sweep_ind is specified, this value will be compared to the value in ping_sweep_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ping_sweep_ind must be specified if op_ping_sweep_ind is specified.
:type val_c_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_port_control_blackout_duration: The operator to apply to the field port_control_blackout_duration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. port_control_blackout_duration: Port Control Blackout duration in minutes For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_port_control_blackout_duration: If op_port_control_blackout_duration is specified, the field named in this input will be compared to the value in port_control_blackout_duration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_port_control_blackout_duration must be specified if op_port_control_blackout_duration is specified.
:type val_f_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_port_control_blackout_duration: If op_port_control_blackout_duration is specified, this value will be compared to the value in port_control_blackout_duration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_port_control_blackout_duration must be specified if op_port_control_blackout_duration is specified.
:type val_c_port_control_blackout_duration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_end: The operator to apply to the field range_end. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_end: The ending IP address for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_end: If op_range_end is specified, the field named in this input will be compared to the value in range_end using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_end must be specified if op_range_end is specified.
:type val_f_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_end: If op_range_end is specified, this value will be compared to the value in range_end using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_end must be specified if op_range_end is specified.
:type val_c_range_end: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_end_numeric: The operator to apply to the field range_end_numeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_end_numeric: The ending IP address numeric value for the discovery setting.ange_end_numeric. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_end_numeric: If op_range_end_numeric is specified, the field named in this input will be compared to the value in range_end_numeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_end_numeric must be specified if op_range_end_numeric is specified.
:type val_f_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_end_numeric: If op_range_end_numeric is specified, this value will be compared to the value in range_end_numeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_end_numeric must be specified if op_range_end_numeric is specified.
:type val_c_range_end_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_mask: The operator to apply to the field range_mask. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_mask: The CIDR mask for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_mask: If op_range_mask is specified, the field named in this input will be compared to the value in range_mask using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_mask must be specified if op_range_mask is specified.
:type val_f_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_mask: If op_range_mask is specified, this value will be compared to the value in range_mask using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_mask must be specified if op_range_mask is specified.
:type val_c_range_mask: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_start: The operator to apply to the field range_start. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_start: The starting IP address for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_start: If op_range_start is specified, the field named in this input will be compared to the value in range_start using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_start must be specified if op_range_start is specified.
:type val_f_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_start: If op_range_start is specified, this value will be compared to the value in range_start using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_start must be specified if op_range_start is specified.
:type val_c_range_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_start_numeric: The operator to apply to the field range_start_numeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_start_numeric: The starting IP address numeric value for the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_start_numeric: If op_range_start_numeric is specified, the field named in this input will be compared to the value in range_start_numeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_start_numeric must be specified if op_range_start_numeric is specified.
:type val_f_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_start_numeric: If op_range_start_numeric is specified, this value will be compared to the value in range_start_numeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_start_numeric must be specified if op_range_start_numeric is specified.
:type val_c_range_start_numeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_type: The operator to apply to the field range_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_type: If op_range_type is specified, the field named in this input will be compared to the value in range_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_type must be specified if op_range_type is specified.
:type val_f_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_type: If op_range_type is specified, this value will be compared to the value in range_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_type must be specified if op_range_type is specified.
:type val_c_range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_range_value: The operator to apply to the field range_value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. range_value: The discovery setting value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_range_value: If op_range_value is specified, the field named in this input will be compared to the value in range_value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_range_value must be specified if op_range_value is specified.
:type val_f_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_range_value: If op_range_value is specified, this value will be compared to the value in range_value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_range_value must be specified if op_range_value is specified.
:type val_c_range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_smart_ping_sweep_ind: The operator to apply to the field smart_ping_sweep_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. smart_ping_sweep_ind: A flag indicating if smart ping sweep should be used on the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_smart_ping_sweep_ind: If op_smart_ping_sweep_ind is specified, the field named in this input will be compared to the value in smart_ping_sweep_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_smart_ping_sweep_ind must be specified if op_smart_ping_sweep_ind is specified.
:type val_f_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_smart_ping_sweep_ind: If op_smart_ping_sweep_ind is specified, this value will be compared to the value in smart_ping_sweep_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_smart_ping_sweep_ind must be specified if op_smart_ping_sweep_ind is specified.
:type val_c_smart_ping_sweep_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_start_blackout_schedule: The operator to apply to the field start_blackout_schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. start_blackout_schedule: The blackout start time in cron format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_start_blackout_schedule: If op_start_blackout_schedule is specified, the field named in this input will be compared to the value in start_blackout_schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_start_blackout_schedule must be specified if op_start_blackout_schedule is specified.
:type val_f_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_start_blackout_schedule: If op_start_blackout_schedule is specified, this value will be compared to the value in start_blackout_schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_start_blackout_schedule must be specified if op_start_blackout_schedule is specified.
:type val_c_start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_start_port_control_blackout_schedule: The operator to apply to the field start_port_control_blackout_schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_start_port_control_blackout_schedule: If op_start_port_control_blackout_schedule is specified, the field named in this input will be compared to the value in start_port_control_blackout_schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_start_port_control_blackout_schedule must be specified if op_start_port_control_blackout_schedule is specified.
:type val_f_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_start_port_control_blackout_schedule: If op_start_port_control_blackout_schedule is specified, this value will be compared to the value in start_port_control_blackout_schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_start_port_control_blackout_schedule must be specified if op_start_port_control_blackout_schedule is specified.
:type val_c_start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the discovery setting was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_by: The operator to apply to the field updated_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_by: The user that last updated the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_by: If op_updated_by is specified, the field named in this input will be compared to the value in updated_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_by must be specified if op_updated_by is specified.
:type val_f_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_by: If op_updated_by is specified, this value will be compared to the value in updated_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_by must be specified if op_updated_by is specified.
:type val_c_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_virtual_network_id: The operator to apply to the field virtual_network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. virtual_network_id: A Virtual Network identifier assigned to the discovery setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_virtual_network_id: If op_virtual_network_id is specified, the field named in this input will be compared to the value in virtual_network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_f_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_virtual_network_id: If op_virtual_network_id is specified, this value will be compared to the value in virtual_network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_c_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The discovery setting identified by the specified id.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param range_value: The discovery setting value.
:type range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE).
:type discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting.
:type ping_sweep_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param virtual_network_id: A Virtual Network identifier assigned to the discovery setting.
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param start_blackout_schedule: The blackout start time in cron format.
:type start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param blackout_duration: The blackout duration in minutes.
:type blackout_duration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format
:type start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param port_control_blackout_duration: Port Control Blackout duration in minutes
:type port_control_blackout_duration: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created discovery setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created discovery setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created discovery setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The newly created discovery setting.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing discovery setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_value: The discovery setting value. If omitted, this field will not be updated.
:type range_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD). If omitted, this field will not be updated.
:type range_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param discovery_status: The discovery mode of the discovery setting (INCLUDE, EXCLUDE, IGNORE). If omitted, this field will not be updated.
:type discovery_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting. If omitted, this field will not be updated.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ping_sweep_ind: A flag indicating if ping sweeps are used on the discovery setting. If omitted, this field will not be updated.
:type ping_sweep_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: A Virtual Network identifier assigned to the discovery setting. If omitted, this field will not be updated.
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_blackout_schedule: The blackout start time in cron format. If omitted, this field will not be updated.
:type start_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param blackout_duration: The blackout duration in minutes. If omitted, this field will not be updated.
:type blackout_duration: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_port_control_blackout_schedule: Port Control Blackout schedule in CRON format If omitted, this field will not be updated.
:type start_port_control_blackout_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port_control_blackout_duration: Port Control Blackout duration in minutes If omitted, this field will not be updated.
:type port_control_blackout_duration: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated discovery setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated discovery setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated discovery setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_setting: The updated discovery setting.
:rtype discovery_setting: DiscoverySetting
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified discovery setting from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def destroy_many(self, **kwargs):
"""Remove several discovery settings
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ids: The IDs array of the discovery settings to delete. When sending form encoded use ids[].
:type ids: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy_many"), kwargs)
def import_settings(self, **kwargs):
"""Imports a list of discovery settings into the database
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param file: The contents of the CSV file with the list of discovery settings to be imported
:type file: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param import_type: The type of discovery settings to import. Valid values are: range, static, seed
:type import_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The UnitID of the collector
:type UnitID: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("import_settings"), kwargs)
def seed_information(self, **kwargs):
"""Returns the following information: if at least one seed exists, if at least one seed has been discovered, if any IPv6 range is missing a seed
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("seed_information"), kwargs)
def seed_status(self, **kwargs):
"""List of all Device Seeds and the entire Discovery Status for each one.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` range_start_numeric
:param sort: The data field to use for sorting the output. Default is range_start_numeric.
:type sort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The UnitID of the collector
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against attribute "range_value" and "VirtualNetworkName". Any DiscoverySetting objects with the passed value contained within one or more of those attributes will be returned.
:type query: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19.
:type limit: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("seed_status"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/discovery_setting_broker.py | Python | apache-2.0 | 69,668 |
#!/usr/bin/env python
import os
from optparse import OptionParser
from jinja2 import Template
HEADER = '!!AUTO-GENERATED!! Edit bin/crontab/crontab.tpl instead.'
TEMPLATE = open(os.path.join(os.path.dirname(__file__), 'crontab.tpl')).read()
def main():
parser = OptionParser()
parser.add_option('-w', '--webapp',
help='Location of web app (required)')
parser.add_option('-u', '--user',
help=('Prefix cron with this user. '
'Only define for cron.d style crontabs.'))
parser.add_option('-p', '--python', default='/usr/bin/python2.7',
help='Python interpreter to use.')
(opts, args) = parser.parse_args()
if not opts.webapp:
parser.error('-w must be defined')
ctx = {'django': 'cd %s; %s manage.py' % (opts.webapp, opts.python)}
ctx['cron'] = '%s cron' % ctx['django']
if opts.user:
for k, v in ctx.iteritems():
ctx[k] = '%s %s' % (opts.user, v)
# Needs to stay below the opts.user injection.
ctx['python'] = opts.python
ctx['header'] = HEADER
print Template(TEMPLATE).render(**ctx)
if __name__ == '__main__':
main()
| zofuthan/airmozilla | bin/crontab/gen-crons.py | Python | bsd-3-clause | 1,205 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Script to visualize the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SEPARATION_PLANE
from pymatgen.analysis.chemenv.utils.scripts_utils import visualize
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane
import numpy as np
if __name__ == '__main__':
print('+-------------------------------------------------------+\n'
'| Development script of the ChemEnv utility of pymatgen |\n'
'| Visualization of the model coordination environments |\n'
'+-------------------------------------------------------+\n')
allcg = AllCoordinationGeometries()
vis = None
while True:
cg_symbol = raw_input('Enter symbol of the geometry you want to see, "l" to see the list '
'of existing geometries or "q" to quit : ')
if cg_symbol == 'q':
break
if cg_symbol == 'l':
print(allcg.pretty_print(maxcn=13, additional_info={'nb_hints': True}))
continue
try:
cg = allcg[cg_symbol]
except LookupError:
print('Wrong geometry, try again ...')
continue
print(cg.name)
for ipoint, point in enumerate(cg.points):
print('Point #{:d} : {} {} {}'.format(ipoint, repr(point[0]), repr(point[1]), repr(point[2])))
print('Algorithms used :')
for ialgo, algo in enumerate(cg.algorithms):
print('Algorithm #{:d} :'.format(ialgo))
print(algo)
print('')
# Visualize the separation plane of a given algorithm
sepplane = False
if any([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorithms]):
test = raw_input('Enter index of the algorithm for which you want to visualize the plane : ')
if test != '':
try:
ialgo = int(test)
algo = cg.algorithms[ialgo]
sepplane = True
except:
print('Unable to determine the algorithm/separation_plane you want '
'to visualize for this geometry. Continues without ...')
myfactor = 3.0
if vis is None:
vis = visualize(cg=cg, zoom=1.0, myfactor=myfactor)
else:
vis = visualize(cg=cg, vis=vis, myfactor=myfactor)
cg_points = [myfactor*np.array(pp) for pp in cg.points]
cg_central_site = myfactor*np.array(cg.central_site)
if sepplane:
pts = [cg_points[ii] for ii in algo.plane_points]
if algo.minimum_number_of_points == 2:
pts.append(cg_central_site)
centre = cg_central_site
else:
centre = np.sum(pts, axis=0) / len(pts)
factor = 1.5
target_dist = max([np.dot(pp-centre, pp-centre) for pp in cg_points])
current_dist = np.dot(pts[0] - centre, pts[0] - centre)
factor = factor * target_dist / current_dist
plane = Plane.from_npoints(points=pts)
p1 = centre + factor * (pts[0] - centre)
perp = factor * np.cross(pts[0] - centre, plane.normal_vector)
p2 = centre + perp
p3 = centre - factor * (pts[0] - centre)
p4 = centre - perp
vis.add_faces([[p1, p2, p3, p4]], [1.0, 0.0, 0.0], opacity=0.5)
target_radius = 0.25
radius = 1.5 * target_radius
if algo.minimum_number_of_points == 2:
vis.add_partial_sphere(coords=cg_central_site, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in pts:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
ps1 = [cg_points[ii] for ii in algo.point_groups[0]]
ps2 = [cg_points[ii] for ii in algo.point_groups[1]]
for pp in ps1:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 1.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in ps2:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 0.0, 1.0], start=0, end=360,
opacity=0.5)
vis.show()
| xhqu1981/pymatgen | dev_scripts/chemenv/view_environment.py | Python | mit | 5,095 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import sqlite3
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pysqlite.googlecode.com/ and http://packages.ubuntu.com/quantal/python-sqlite
User guide: http://docs.python.org/release/2.5/lib/module-sqlite3.html
API: http://docs.python.org/library/sqlite3.html
Debian package: python-sqlite (SQLite 2), python-pysqlite3 (SQLite 3)
License: MIT
Possible connectors: http://wiki.python.org/moin/SQLite
"""
def __init__(self):
GenericConnector.__init__(self)
self.__sqlite = sqlite3
def connect(self):
self.initConnection()
self.checkFileDb()
try:
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
cursor = self.connector.cursor()
cursor.execute("SELECT * FROM sqlite_master")
cursor.close()
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
warnMsg = "unable to connect using SQLite 3 library, trying with SQLite 2"
logger.warn(warnMsg)
try:
try:
import sqlite
except ImportError:
errMsg = "sqlmap requires 'python-sqlite' third-party library "
errMsg += "in order to directly connect to the database '%s'" % self.db
raise SqlmapMissingDependence(errMsg)
self.__sqlite = sqlite
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
raise SqlmapConnectionException(msg[0])
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
return None
def execute(self, query):
try:
self.cursor.execute(utf8encode(query))
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
except self.__sqlite.DatabaseError, msg:
raise SqlmapConnectionException(msg[0])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
| V11/volcano | server/sqlmap/plugins/dbms/sqlite/connector.py | Python | mit | 3,003 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, André Paramés <[email protected]>
# Based on the Git module by Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = u'''
---
module: bzr
author: "André Paramés (@andreparames)"
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr:
name: 'bzr+ssh://foosball.example.org/path/to/branch'
dest: /srv/checkout
version: 22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Rajeshkumar90/ansible-modules-extras | source_control/bzr.py | Python | gpl-3.0 | 6,658 |
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import Quartz
from AppKit import NSEvent, NSScreen
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
| Sebelino/PyUserInput | pymouse/mac.py | Python | lgpl-3.0 | 5,547 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as _linalg
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(grad * c,
array_ops.concat([array_ops.shape(c), [1, 1]],
0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += _linalg.adjoint(grad_a)
return grad_a * 0.5
@ops.RegisterGradient("Qr")
def _QrGrad(op, dq, dr):
"""Gradient for Qr."""
q, r = op.outputs
if q.dtype.is_complex:
raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
r.shape.as_list()[-1] is None):
raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
if r.shape[-2].value != r.shape[-1].value:
raise NotImplementedError("QrGrad not implemented when ncols > nrows "
"or full_matrices is true and ncols != nrows.")
qdq = math_ops.matmul(q, dq, adjoint_a=True)
qdq_ = qdq - _linalg.adjoint(qdq)
rdr = math_ops.matmul(r, dr, adjoint_b=True)
rdr_ = rdr - _linalg.adjoint(rdr)
tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)
def _TriangularSolve(x, r):
"""Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
return _linalg.adjoint(
linalg_ops.matrix_triangular_solve(
r, _linalg.adjoint(x), lower=False, adjoint=False))
grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
return grad_a + grad_b
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _Overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
x = op.outputs[0]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=True)
# pylint: enable=protected-access
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _Underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=False)
# pylint: enable=protected-access
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _Overdetermined(op, grad)
else:
return _Underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _Overdetermined(op, grad),
lambda: _Underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
@ops.RegisterGradient("Svd")
def _SvdGrad(op, grad_s, grad_u, grad_v):
"""Gradient for the singular value decomposition."""
# The derivation for the compute_uv=False case, and most of
# the derivation for the full_matrices=True case, are in
# Giles' paper (see reference at top of file). A derivation for
# the full_matrices=False case is available at
# https://j-towns.github.io/papers/svd-derivative.pdf
a = op.inputs[0]
a_shape = a.get_shape().with_rank_at_least(2)
grad_s_mat = array_ops.matrix_diag(grad_s)
if not op.get_attr("compute_uv"):
s, u, v = linalg_ops.svd(a, compute_uv=True)
grad_a = math_ops.matmul(u, math_ops.matmul(grad_s_mat, v, adjoint_b=True))
grad_a.set_shape(a_shape)
return grad_a
full_matrices = op.get_attr("full_matrices")
# TODO(rmlarsen): Make this work with complex types.
if a.dtype.is_complex:
raise NotImplementedError(
"SVD gradient is not implemented for complex types and "
"compute_uv=True.")
grad_u_shape = grad_u.get_shape().with_rank_at_least(2)
grad_v_shape = grad_v.get_shape().with_rank_at_least(2)
m = a_shape[-2].merge_with(grad_u_shape[-2])
n = a_shape[-1].merge_with(grad_v_shape[-2])
batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with(
grad_v_shape[:-2])
a_shape = batch_shape.concatenate([m, n])
m = a_shape[-2].value
n = a_shape[-1].value
# TODO(rmlarsen): Make this work with placeholders.
if m is None or n is None:
raise NotImplementedError(
"SVD gradient has not been implemented for input with unknown "
"inner matrix shape.")
s = op.outputs[0]
u = op.outputs[1]
v = op.outputs[2]
use_adjoint = False
if m > n:
# Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the
# Hermitian transpose of the gradient at the end.
use_adjoint = True
m, n = n, m
u, v = v, u
grad_u, grad_v = grad_v, grad_u
with ops.control_dependencies([grad_s, grad_u, grad_v]):
if full_matrices and abs(m - n) > 1:
raise NotImplementedError(
"svd gradient is not implemented for abs(m - n) > 1 "
"when full_matrices is True")
s_mat = array_ops.matrix_diag(s)
s2 = math_ops.square(s)
# NOTICE: Because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when singular values are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate singular values, the corresponding singular vectors are
# only defined up a (k-dimensional) subspace. In practice, this can
# lead to numerical instability when singular values are close but not
# exactly equal.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1)),
array_ops.zeros_like(s))
s_inv_mat = array_ops.matrix_diag(math_ops.reciprocal(s))
v1 = v[..., :, :m]
grad_v1 = grad_v[..., :, :m]
u_gu = math_ops.matmul(u, grad_u, adjoint_a=True)
v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True)
f_u = f * u_gu
f_v = f * v_gv
term1_nouv = (
grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) +
math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v)))
term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True))
if m == n:
grad_a_before_transpose = term1
else:
gv1t = array_ops.matrix_transpose(grad_v1)
gv1t_v1 = math_ops.matmul(gv1t, v1)
term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True)
if full_matrices:
v2 = v[..., :, m:n]
grad_v2 = grad_v[..., :, m:n]
v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True)
term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True)
u_s_inv = math_ops.matmul(u, s_inv_mat)
term2 = math_ops.matmul(u_s_inv, term2_nous)
grad_a_before_transpose = term1 + term2
if use_adjoint:
grad_a = array_ops.matrix_transpose(grad_a_before_transpose)
else:
grad_a = grad_a_before_transpose
grad_a.set_shape(a_shape)
return grad_a
| nburn42/tensorflow | tensorflow/python/ops/linalg_grad.py | Python | apache-2.0 | 14,666 |
"""Constants for the Aftership integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Final
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
DOMAIN: Final = "aftership"
ATTRIBUTION: Final = "Information provided by AfterShip"
ATTR_TRACKINGS: Final = "trackings"
BASE: Final = "https://track.aftership.com/"
CONF_SLUG: Final = "slug"
CONF_TITLE: Final = "title"
CONF_TRACKING_NUMBER: Final = "tracking_number"
DEFAULT_NAME: Final = "aftership"
UPDATE_TOPIC: Final = f"{DOMAIN}_update"
ICON: Final = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES: Final = timedelta(minutes=15)
SERVICE_ADD_TRACKING: Final = "add_tracking"
SERVICE_REMOVE_TRACKING: Final = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA: Final = vol.Schema(
{vol.Required(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
| jawilson/home-assistant | homeassistant/components/aftership/const.py | Python | apache-2.0 | 1,116 |
"""Describe group states."""
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OK, STATE_PROBLEM
from homeassistant.core import HomeAssistant, callback
@callback
def async_describe_on_off_states(
hass: HomeAssistant, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states({STATE_PROBLEM}, STATE_OK)
| jawilson/home-assistant | homeassistant/components/plant/group.py | Python | apache-2.0 | 421 |
import operator
import unittest
import numpy
import six
from cupy import testing
@testing.gpu
class TestArrayElementwiseOp(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_scalar_op(self, op, xp, dtype, swap=False):
a = testing.shaped_arange((2, 3), xp, dtype)
if swap:
return op(dtype(2), a)
else:
return op(a, dtype(2))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_rdiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div, swap=True)
def test_idiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_rfloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv, swap=True)
def test_ifloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
def test_ipow_scalar(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return op(a, b)
def test_add_array(self):
self.check_array_scalar_op(operator.add)
def test_iadd_array(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_array(self):
self.check_array_scalar_op(operator.sub)
def test_isub_array(self):
self.check_array_scalar_op(operator.isub)
def test_mul_array(self):
self.check_array_scalar_op(operator.mul)
def test_imul_array(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_itruediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_idiv_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_ifloordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_array(self):
self.check_array_scalar_op(operator.pow)
def test_ipow_array(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), dtype=dtype)
b = testing.shaped_arange((2, 1), dtype=dtype)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
self.check_array_broadcasted_op(operator.sub)
def test_broadcasted_isub(self):
self.check_array_broadcasted_op(operator.isub)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.div)
def test_broadcasted_idiv(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.idiv)
def test_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.floordiv)
def test_broadcasted_ifloordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.ifloordiv)
def test_broadcasted_pow(self):
self.check_array_broadcasted_op(operator.pow)
def test_broadcasted_ipow(self):
self.check_array_broadcasted_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_doubly_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 1, 3), xp, dtype)
b = testing.shaped_arange((3, 1), xp, dtype)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
numpy.seterr(divide='ignore', invalid='ignore')
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.floordiv)
def test_doubly_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.div)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return op(a, a[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
| ikasumi/chainer | tests/cupy_tests/test_ndarray_elementwise_op.py | Python | mit | 8,059 |
# -*- coding: utf-8 -*-
from itertools import chain
from django.contrib.sites.models import Site
from django.core.urlresolvers import NoReverseMatch, reverse_lazy
from django.forms.widgets import Select, MultiWidget, TextInput
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
from cms.utils.compat.dj import force_unicode
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.choices = []
super(PageSelectWidget, self).__init__((Select, Select, Select), attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
site_choices = get_site_choices()
page_choices = get_page_choices()
self.site_choices = site_choices
self.choices = page_choices
self.widgets = (Select(choices=site_choices ),
Select(choices=[('', '----')]),
Select(choices=self.choices, attrs={'style': "display:none;"} ),
)
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PageSmartLinkWidget(TextInput):
def __init__(self, attrs=None, ajax_view=None):
super(PageSmartLinkWidget, self).__init__(attrs)
self.ajax_url = self.get_ajax_url(ajax_view=ajax_view)
def get_ajax_url(self, ajax_view):
try:
return reverse_lazy(ajax_view)
except NoReverseMatch:
raise Exception(
'You should provide an ajax_view argument that can be reversed to the PageSmartLinkWidget'
)
def render(self, name=None, value=None, attrs=None):
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
output = [r'''<script type="text/javascript">
(function($){
$(function(){
$("#%(element_id)s").select2({
placeholder: "%(placeholder_text)s",
allowClear: true,
minimumInputLength: 3,
ajax: {
url: "%(ajax_url)s",
dataType: 'json',
data: function (term, page) {
return {
q: term, // search term
language_code: '%(language_code)s'
};
},
results: function (data, page) {
return {
more: false,
results: $.map(data, function(item, i){
return {
'id':item.redirect_url,
'text': item.title + ' (/' + item.path + ')'}
}
)
};
}
},
// Allow creation of new entries
createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} },
multiple: false,
initSelection : function (element, callback) {
var initialValue = element.val()
callback({id:initialValue, text: initialValue});
}
});
})
})(django.jQuery);
</script>''' % {
'element_id': id_,
'placeholder_text': final_attrs.get('placeholder_text', ''),
'language_code': self.language,
'ajax_url': force_unicode(self.ajax_url)
}]
output.append(super(PageSmartLinkWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class Media:
css = {
'all': ('cms/js/select2/select2.css',
'cms/js/select2/select2-bootstrap.css',)
}
js = (#'cms/js/libs/jquery.min.js',
'cms/js/select2/select2.js',)
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(PageUser._meta.app_label + '.' + PageUser._meta.get_add_permission())):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class AppHookSelect(Select):
"""Special widget used for the App Hook selector in the Advanced Settings
of the Page Admin. It adds support for a data attribute per option and
includes supporting JS into the page.
"""
class Media:
js = ('cms/js/modules/cms.base.js', 'cms/js/modules/cms.app_hook_select.js', )
def __init__(self, attrs=None, choices=(), app_namespaces={}):
self.app_namespaces = app_namespaces
super(AppHookSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_value in self.app_namespaces:
data_html = mark_safe(' data-namespace="%s"' % self.app_namespaces[option_value])
else:
data_html = ''
return '<option value="%s"%s%s>%s</option>' % (
option_value,
selected_html,
data_html,
force_text(option_label),
)
def render_options(self, choices, selected_choices):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
| amaozhao/basecms | cms/forms/widgets.py | Python | mit | 10,371 |
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
discussion_url = reverse(
'discussion_course',
kwargs={'course_id': course_id},
request=request
) if course_overview.is_discussion_tab_enabled() else None
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
discussion_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"discussion_url": discussion_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object):
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object):
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
| xingyepei/edx-platform | lms/djangoapps/mobile_api/users/serializers.py | Python | agpl-3.0 | 4,342 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Downloads the necessary NLTK corpora for TextBlob.
Usage: ::
$ python -m textblob.download_corpora
If you only intend to use TextBlob's default models, you can use the "lite"
option: ::
$ python -m textblob.download_corpora lite
"""
import sys
import nltk
MIN_CORPORA = [
'brown', # Required for FastNPExtractor
'punkt', # Required for WordTokenizer
'wordnet' # Required for lemmatization
]
ADDITIONAL_CORPORA = [
'conll2000', # Required for ConllExtractor
'maxent_treebank_pos_tagger', # Required for NLTKTagger
'movie_reviews', # Required for NaiveBayesAnalyzer
]
ALL_CORPORA = MIN_CORPORA + ADDITIONAL_CORPORA
def download_lite():
for each in MIN_CORPORA:
nltk.download(each)
def download_all():
for each in ALL_CORPORA:
nltk.download(each)
def main():
if 'lite' in sys.argv:
download_lite()
else:
download_all()
print("Finished.")
if __name__ == '__main__':
main()
| beni55/TextBlob | textblob/download_corpora.py | Python | mit | 1,025 |
from django.db import transaction
from denorm.db import base
class RandomBigInt(base.RandomBigInt):
def sql(self):
return '(9223372036854775806::INT8 * ((RANDOM()-0.5)*2.0) )::INT8'
class TriggerNestedSelect(base.TriggerNestedSelect):
def sql(self):
columns = self.columns
table = self.table
where = ",".join(["%s = %s" % (k, v) for k, v in self.kwargs.iteritems()])
return 'SELECT DISTINCT %(columns)s FROM %(table)s WHERE %(where)s' % locals(), tuple()
class TriggerActionInsert(base.TriggerActionInsert):
def sql(self):
table = self.model._meta.db_table
columns = "(" + ",".join(self.columns) + ")"
params = []
if isinstance(self.values, TriggerNestedSelect):
sql, nested_params = self.values.sql()
values = "(" + sql + ")"
params.extend(nested_params)
else:
values = "VALUES(" + ",".join(self.values) + ")"
sql = (
'BEGIN\n'
'INSERT INTO %(table)s %(columns)s %(values)s;\n'
'EXCEPTION WHEN unique_violation THEN -- do nothing\n'
'END\n'
) % locals()
return sql, params
class TriggerActionUpdate(base.TriggerActionUpdate):
def sql(self):
table = self.model._meta.db_table
params = []
updates = ','.join(["%s=%s" % (k, v) for k, v in zip(self.columns, self.values)])
if isinstance(self.where, tuple):
where, where_params = self.where
else:
where, where_params = self.where, []
params.extend(where_params)
return 'UPDATE %(table)s SET %(updates)s WHERE %(where)s' % locals(), params
class Trigger(base.Trigger):
def name(self):
name = base.Trigger.name(self)
if self.content_type_field:
name += "_%s" % self.content_type
return name
def sql(self):
name = self.name()
params = []
action_set = set()
for a in self.actions:
sql, action_params = a.sql()
if sql:
action_set.add(sql)
params.extend(action_params)
actions = ";\n ".join(action_set) + ';'
table = self.db_table
time = self.time.upper()
event = self.event.upper()
content_type = self.content_type
ct_field = self.content_type_field
conditions = []
if event == "UPDATE":
for field, native_type in self.fields:
if native_type is None:
# If Django didn't know what this field type should be
# then compare it as text - Fixes a problem of trying to
# compare PostGIS geometry fields.
conditions.append("(OLD.%(f)s::%(t)s IS DISTINCT FROM NEW.%(f)s::%(t)s)" % {'f': field, 't': 'text'})
else:
conditions.append("( OLD.%(f)s IS DISTINCT FROM NEW.%(f)s )" % {'f': field})
conditions = ["(%s)" % "OR".join(conditions)]
if ct_field:
if event == "UPDATE":
conditions.append("(OLD.%(ctf)s=%(ct)s)OR(NEW.%(ctf)s=%(ct)s)" % {'ctf': ct_field, 'ct': content_type})
elif event == "INSERT":
conditions.append("(NEW.%s=%s)" % (ct_field, content_type))
elif event == "DELETE":
conditions.append("(OLD.%s=%s)" % (ct_field, content_type))
if not conditions:
cond = "TRUE"
else:
cond = "AND".join(conditions)
sql = """
CREATE OR REPLACE FUNCTION func_%(name)s()
RETURNS TRIGGER AS $$
BEGIN
IF %(cond)s THEN
%(actions)s
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER %(name)s
%(time)s %(event)s ON %(table)s
FOR EACH ROW EXECUTE PROCEDURE func_%(name)s();
""" % locals()
return sql, params
class TriggerSet(base.TriggerSet):
def drop(self):
cursor = self.cursor()
cursor.execute("SELECT pg_class.relname, pg_trigger.tgname FROM pg_trigger LEFT JOIN pg_class ON (pg_trigger.tgrelid = pg_class.oid) WHERE pg_trigger.tgname LIKE 'denorm_%%';")
for table_name, trigger_name in cursor.fetchall():
cursor.execute('DROP TRIGGER %s ON %s;' % (trigger_name, table_name))
transaction.commit_unless_managed(using=self.using)
def install(self):
cursor = self.cursor()
cursor.execute("SELECT lanname FROM pg_catalog.pg_language WHERE lanname ='plpgsql'")
if not cursor.fetchall():
cursor.execute('CREATE LANGUAGE plpgsql')
for name, trigger in self.triggers.iteritems():
sql, args = trigger.sql()
cursor.execute(sql, args)
transaction.commit_unless_managed(using=self.using)
| indexofire/gork | src/gork/contrib/denorm/db/postgresql/triggers.py | Python | mit | 4,826 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints brief HTML picture and links to resources
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Prints html image and link to photo resources.
"""
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
resources = bfo.fields("8564_")
out = ""
for resource in resources:
if resource.get("x", "") == "icon":
out += '<a href="'+CFG_SITE_URL+'/'+ CFG_SITE_RECORD +'/'+bfo.control_field("001")+ \
'?ln='+ bfo.lang + '"><img src="' + resource.get("u", "").replace(" ","") \
+ '" alt="" border="0"/></a>'
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| nkalodimas/invenio | modules/bibformat/lib/elements/bfe_photo_resources_brief.py | Python | gpl-2.0 | 1,594 |
# (c) 2019 Telstra Corporation Limited
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.cloud.amazon.aws_acm import pem_chain_split, chain_compare
from ansible.module_utils._text import to_bytes, to_text
from pprint import pprint
def test_chain_compare():
# The functions we're testing take module as an argument
# Just so they can call module.fail_json
# Let's just use None for the unit tests,
# Because they shouldn't fail
# And if they do, fail_json is not applicable
module = None
fixture_suffix = 'test/units/modules/cloud/amazon/fixtures/certs'
# Test chain split function on super simple (invalid) certs
expected = ['aaa', 'bbb', 'ccc']
for fname in ['simple-chain-a.cert', 'simple-chain-b.cert']:
path = fixture_suffix + '/' + fname
with open(path, 'r') as f:
pem = to_text(f.read())
actual = pem_chain_split(module, pem)
actual = [a.strip() for a in actual]
if actual != expected:
print("Expected:")
pprint(expected)
print("Actual:")
pprint(actual)
raise AssertionError("Failed to properly split %s" % fname)
# Now test real chains
# chains with same same_as should be considered equal
test_chains = [
{ # Original Cert chain
'path': fixture_suffix + '/chain-1.0.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but longer PEM lines
'path': fixture_suffix + '/chain-1.1.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but without the stuff before each --------
'path': fixture_suffix + '/chain-1.2.cert',
'same_as': 1,
'length': 3
},
{ # Same as 1.0, but in a different order, so should be considered different
'path': fixture_suffix + '/chain-1.3.cert',
'same_as': 2,
'length': 3
},
{ # Same as 1.0, but with last link missing
'path': fixture_suffix + '/chain-1.4.cert',
'same_as': 3,
'length': 2
},
{ # Completely different cert chain to all the others
'path': fixture_suffix + '/chain-4.cert',
'same_as': 4,
'length': 3
},
{ # Single cert
'path': fixture_suffix + '/a.pem',
'same_as': 5,
'length': 1
},
{ # a different, single cert
'path': fixture_suffix + '/b.pem',
'same_as': 6,
'length': 1
}
]
for chain in test_chains:
with open(chain['path'], 'r') as f:
chain['pem_text'] = to_text(f.read())
# Test to make sure our regex isn't too greedy
chain['split'] = pem_chain_split(module, chain['pem_text'])
if len(chain['split']) != chain['length']:
print("Cert before split")
print(chain['pem_text'])
print("Cert after split")
pprint(chain['split'])
print("path: %s" % chain['path'])
print("Expected chain length: %d" % chain['length'])
print("Actual chain length: %d" % len(chain['split']))
raise AssertionError("Chain %s was not split properly" % chain['path'])
for chain_a in test_chains:
for chain_b in test_chains:
expected = (chain_a['same_as'] == chain_b['same_as'])
# Now test the comparison function
actual = chain_compare(module, chain_a['pem_text'], chain_b['pem_text'])
if expected != actual:
print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path']))
print("Expected %s got %s" % (str(expected), str(actual)))
assert(expected == actual)
| roadmapper/ansible | test/units/modules/cloud/amazon/test_aws_acm.py | Python | gpl-3.0 | 4,581 |
#!/usr/bin/env python2
'''
Simple monitoring script to collect per process cpu percentage
and mem usage in bytes (vms or virt and rss)
usage:
cron-send-cpu-mem-stats process_name openshift.whatever.zabbix.key
or
cron-send-cpu-mem-stats 'something parameter more params' openshift.something.parameter.more.params
The script will attach .cpu and .mem.{vms|rss} to the end of the zabbix key name for the values
Future enhancement can be to add multiple instances, that would add pid to the key, but those
would have to be dynamic items in zabbix
'''
# vim: expandtab:tabstop=4:shiftwidth=4
# Disabling invalid-name because pylint doesn't like the naming conention we have.
# pylint: disable=invalid-name
import argparse
import psutil
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='CPU and Memory per process stats collector')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('process_str', help='The process command line string to match')
parser.add_argument('zabbix_key_prefix', help='Prefix for the key that will be sent \
to zabbix with this data, will get a .cpu and .mem suffix')
return parser.parse_args()
def main():
""" Main function to run the check """
argz = parse_args()
proc_parts = argz.process_str.split()
zagg_data = {}
for proc in psutil.process_iter():
try:
if proc_parts[0] == proc.name():
proc.dict = proc.as_dict(['cmdline', 'memory_info'])
cmdline = proc.dict['cmdline']
if len(proc_parts) > 1 and len(cmdline) > 1:
part_count = len(proc_parts[1:])
# This call might be confusing, (I know I will be in 2 weeks) so quick explanation:
# if the process name matches above, it will check the rest of the strings
# against the /proc/<pid>/cmdline contents, order shouldn't matter since all have to match
if len(set(proc_parts[1:]).intersection(set(cmdline[1:1+part_count]))) != part_count:
continue
if argz.debug:
print cmdline
cpu_percent = '{0:.2f}'.format(proc.cpu_percent(interval=0.5))
mem_vms = '{0}'.format(getattr(proc.dict['memory_info'], 'vms'))
mem_rss = '{0}'.format(getattr(proc.dict['memory_info'], 'rss'))
zagg_data = {'{0}.cpu'.format(argz.zabbix_key_prefix) : cpu_percent,
'{0}.mem.vms'.format(argz.zabbix_key_prefix) : mem_vms,
'{0}.mem.rss'.format(argz.zabbix_key_prefix) : mem_rss}
except psutil.NoSuchProcess:
pass
if argz.debug:
try:
print 'Process ({0}) is using {1} CPU and {2} {3} memory'.format(argz.process_str,
cpu_percent,
mem_vms,
mem_rss)
print 'Zagg will receive: {0}'.format(zagg_data)
except NameError as ex:
print 'No values: {0}'.format(ex)
if zagg_data:
ms = MetricSender(debug=argz.debug)
ms.add_metric(zagg_data)
ms.send_metrics()
if __name__ == '__main__':
main()
| ivanhorvath/openshift-tools | scripts/monitoring/cron-send-cpu-mem-stats.py | Python | apache-2.0 | 3,773 |
def f(s):
s = s[::-1]
return s.swapcase()
result = f(f(f(f(f('abcdef'))))) # breakpoint
| allotria/intellij-community | python/testData/debug/stepping/test_smart_step_into_native_function_in_return.py | Python | apache-2.0 | 99 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Secrets framework provides means of getting connection objects from various sources, e.g. the following:
* Environment variables
* Metastore database
* AWS SSM Parameter store
"""
__all__ = ['BaseSecretsBackend', 'DEFAULT_SECRETS_SEARCH_PATH']
from airflow.secrets.base_secrets import BaseSecretsBackend
DEFAULT_SECRETS_SEARCH_PATH = [
"airflow.secrets.environment_variables.EnvironmentVariablesBackend",
"airflow.secrets.metastore.MetastoreBackend",
]
| airbnb/airflow | airflow/secrets/__init__.py | Python | apache-2.0 | 1,267 |
from buck import format_watchman_query_params, glob_internal, LazyBuildEnvPartial
from buck import subdir_glob, BuildFileContext
from pathlib import Path, PurePosixPath, PureWindowsPath
import os
import shutil
import tempfile
import unittest
class FakePathMixin(object):
def glob(self, pattern):
return self.glob_results.get(pattern)
def is_file(self):
return True
class FakePosixPath(FakePathMixin, PurePosixPath):
pass
class FakeWindowsPath(FakePathMixin, PureWindowsPath):
pass
def fake_path(fake_path_class, path, glob_results={}):
# Path does magic in __new__ with its args; it's hard to add more without
# changing that class. So we use a wrapper function to diddle with
# FakePath's members.
result = fake_path_class(path)
result.glob_results = {}
for pattern, paths in glob_results.iteritems():
result.glob_results[pattern] = [result / fake_path_class(p) for p in paths]
return result
class TestBuckPlatformBase(object):
def test_glob_includes_simple(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_sort(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'E.java', 'D.java', 'C.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java', 'C.java', 'D.java', 'E.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/A.java', 'bar/B.java', 'baz/C.java', 'baz/D.java'],
glob_internal(
includes=['bar/*.java', 'baz/*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_excludes_double_star(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['A.java', 'B.java', 'Test.java'],
})
self.assertGlobMatches(
['A.java', 'B.java'],
glob_internal(
includes=['**/*.java'],
excludes=['**/*Test.java'],
include_dotfiles=False,
search_base=search_base))
def test_glob_excludes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/B.java', 'baz/D.java'],
glob_internal(
includes=['bar/*.java', 'baz/*.java'],
excludes=['*/[AC].java'],
include_dotfiles=False,
search_base=search_base))
def test_subdir_glob(self):
build_env = BuildFileContext(None, None, None, None, None, None, None, None)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
'lib/baz/*.h': ['lib/baz/C.h', 'lib/baz/D.h'],
})
self.assertGlobMatches(
{
'bar/B.h': 'lib/bar/B.h',
'bar/A.h': 'lib/bar/A.h',
'baz/D.h': 'lib/baz/D.h',
'baz/C.h': 'lib/baz/C.h',
},
subdir_glob([
('lib', 'bar/*.h'),
('lib', 'baz/*.h')],
build_env=build_env,
search_base=search_base))
def test_subdir_glob_with_prefix(self):
build_env = BuildFileContext(None, None, None, None, None, None, None, None)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
})
self.assertGlobMatches(
{
'Prefix/bar/B.h': 'lib/bar/B.h',
'Prefix/bar/A.h': 'lib/bar/A.h',
},
subdir_glob([('lib', 'bar/*.h')],
prefix='Prefix',
build_env=build_env,
search_base=search_base))
def test_glob_excludes_relative(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['foo/A.java', 'foo/bar/B.java', 'bar/C.java'],
})
self.assertGlobMatches(
['foo/A.java', 'foo/bar/B.java'],
glob_internal(
includes=['**/*.java'],
excludes=['bar/*.java'],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_skips_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['A.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=False,
search_base=search_base))
def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['.B.java', 'A.java'],
glob_internal(
includes=['*.java'],
excludes=[],
include_dotfiles=True,
search_base=search_base))
def test_lazy_build_env_partial(self):
def cobol_binary(
name,
deps=[],
build_env=None):
return (name, deps, build_env)
testLazy = LazyBuildEnvPartial(cobol_binary)
testLazy.build_env = {}
self.assertEqual(
('HAL', [1, 2, 3], {}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
testLazy.build_env = {'abc': 789}
self.assertEqual(
('HAL', [1, 2, 3], {'abc': 789}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
def test_explicit_exclude_with_file_separator_excludes(self):
search_base = self.fake_path(
'foo',
glob_results={'java/**/*.java': ['java/Include.java', 'java/Exclude.java']})
self.assertGlobMatches(
['java/Include.java'],
glob_internal(
includes=['java/**/*.java'],
excludes=['java/Exclude.java'],
include_dotfiles=False,
search_base=search_base))
class TestBuckPosix(TestBuckPlatformBase, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakePosixPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
class TestBuckWindows(TestBuckPlatformBase, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakeWindowsPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
# Fix the path separator to make test writing easier
fixed_expected = None
if isinstance(expected, list):
fixed_expected = []
for path in expected:
fixed_expected.append(path.replace('/', '\\'))
else:
fixed_expected = {}
for key, value in expected.items():
fixed_expected.update({key.replace('/', '\\'): value.replace('/', '\\')})
self.assertEqual(fixed_expected, actual)
class TestBuck(unittest.TestCase):
def test_glob_double_star_integration(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'b', 'a', 'c', 'a')
os.makedirs(subdir)
f = open(os.path.join(subdir, 'A.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'B.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'Test.java'), 'w')
f.close()
f = open(os.path.join(subdir, '.tmp.java'), 'w')
f.close()
os.makedirs(os.path.join(subdir, 'NotAFile.java'))
self.assertEquals(
[
os.path.join('b', 'a', 'c', 'a', 'A.java'),
os.path.join('b', 'a', 'c', 'a', 'B.java'),
],
glob_internal(
includes=['b/a/**/*.java'],
excludes=['**/*Test.java'],
include_dotfiles=False,
search_base=Path(d)))
finally:
shutil.rmtree(d)
def test_case_preserved(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'java')
os.makedirs(subdir)
open(os.path.join(subdir, 'Main.java'), 'w').close()
self.assertEquals(
[
os.path.join('java', 'Main.java'),
],
glob_internal(
includes=['java/Main.java'],
excludes=[],
include_dotfiles=False,
search_base=Path(d)))
finally:
shutil.rmtree(d)
def test_watchman_query_params_includes(self):
query_params = format_watchman_query_params(
['**/*.java'],
[],
False,
'/path/to/glob')
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
'exists',
['anyof', ['type', 'f'], ['type', 'l']],
['anyof', ['match', '**/*.java', 'wholename', {}]],
]
},
query_params)
def test_watchman_query_params_includes_and_excludes(self):
query_params = format_watchman_query_params(
['**/*.java'],
['**/*Test.java'],
False,
'/path/to/glob')
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
'exists',
['anyof', ['type', 'f'], ['type', 'l']],
['anyof', ['match', '**/*.java', 'wholename', {}]],
['not', ['anyof', ['match', '**/*Test.java', 'wholename', {}]]],
]
},
query_params)
if __name__ == '__main__':
unittest.main()
| Learn-Android-app/buck | src/com/facebook/buck/json/buck_test.py | Python | apache-2.0 | 11,264 |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
from contextlib import contextmanager
from StringIO import StringIO
import mock
from nose.tools import raises
from . import prompt as _
class TestValueToStr():
def test_none(self):
# pass none to value_to_str
assert _.value_to_str(None) == '', 'passing None should return an empty string'
def test_nonstring(self):
# pass a non-string value to value_to_str
assert _.value_to_str(1) == '1', 'passing 1 should return the string "1"'
class TestSuggestion():
@raises(ValueError)
def test_new_bad_char_type(self):
# pass a non-string type as char to suggestion
_.Suggestion(None, 1)
@raises(ValueError)
def test_new_bad_multichar(self):
# pass multiple chars where one is expected
_.Suggestion(None, 'badvalue')
def test_str_method(self):
# test __str__ method of Suggestion
suggestion = _.Suggestion('alpha', 'a', 'test', True)
strval = str(suggestion)
expect = '<Suggestion char="a" desc="test" value="alpha" default>'
assert strval == expect, 'Suggestion is not producing the correct string value %s' % expect
@contextmanager
def mockInput(fn):
original = __builtins__['raw_input']
__builtins__['raw_input'] = fn
yield
__builtins__['raw_input'] = original
class TestGetInput():
def setUp(self):
self.suggestions = [_.Suggestion('alpha', 'a', 'test', False)]
@raises(SystemExit)
def test_get_input_sys_exit(self):
# bad input from user
def temp(_):
raise KeyboardInterrupt
with mockInput(temp):
_.get_input('Test', lambda _: True, self.suggestions)
def test_get_input_empty_then_full(self):
# test both major paths of get_input
# Python 2 does not have the 'nonlocal' keyword, so we fudge the closure with an object.
class Temp:
def __init__(self):
self.flag = False
def __call__(self, _):
if not self.flag:
self.flag = True
return ''
else:
return 'a'
with mockInput(Temp()):
assert _.get_input('Test', lambda x: x, self.suggestions) == 'alpha', 'get_input should return "alpha" for input "a"'
def test_get_input_empty_default(self):
# empty input should choose the default
self.suggestions[0].default = True
with mockInput(lambda _: ''):
assert _.get_input('Test', lambda x: x+'_validated', self.suggestions) == 'alpha_validated', 'get_input should return the default value "alpha"'
def test_get_input_empty_default_no_validator(self):
# empty input should choose the default and not validate
self.suggestions[0].default = True
with mockInput(lambda _: ''):
assert _.get_input('Test', suggestions=self.suggestions) == 'alpha', 'get_input should return the default value "alpha"'
@mock.patch('os.path.expanduser')
def test_get_input_path(self, mock_expanduser):
# should correctly validate path
mock_expanduser.side_effect = lambda x: '/path'+x
with mockInput(lambda _: '/test'):
assert _.get_input(validator=lambda x: x, is_path=True) == '/path/test', 'get_input should return the default value "alpha"'
| delectable/DIGITS | digits/config/test_prompt.py | Python | bsd-3-clause | 3,424 |
# Copyright (C) 2011-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests python Finish
# Breakpoints.
class ExceptionFinishBreakpoint(gdb.FinishBreakpoint):
def __init__(self, frame):
gdb.FinishBreakpoint.__init__ (self, frame, internal=1)
self.silent = True
print ("init ExceptionFinishBreakpoint")
def stop(self):
print ("stopped at ExceptionFinishBreakpoint")
return True
def out_of_scope(self):
print ("exception did not finish ...")
print ("Python script imported")
| freak97/binutils | gdb/testsuite/gdb.python/py-finish-breakpoint2.py | Python | gpl-2.0 | 1,221 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_disks
short_description: "Module to manage Virtual Machine and floating disks in oVirt"
version_added: "2.2"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage Virtual Machine and floating disks in oVirt."
options:
id:
description:
- "ID of the disk to manage. Either C(id) or C(name) is required."
name:
description:
- "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
aliases: ['alias']
vm_name:
description:
- "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
vm_id:
description:
- "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
state:
description:
- "Should the Virtual Machine disk be present/absent/attached/detached."
choices: ['present', 'absent', 'attached', 'detached']
default: 'present'
image_path:
description:
- "Path to disk image, which should be uploaded."
- "Note that currently we support only compability version 0.10 of the qcow disk."
- "Note that you must have an valid oVirt engine CA in your system trust store
or you must provide it in C(ca_file) parameter."
- "Note that there is no reliable way to achieve idempotency, so
if you want to upload the disk even if the disk with C(id) or C(name) exists,
then please use C(force) I(true). If you will use C(force) I(false), which
is default, then the disk image won't be uploaded."
version_added: "2.3"
size:
description:
- "Size of the disk. Size should be specified using IEC standard units.
For example 10GiB, 1024MiB, etc."
- "Size can be only increased, not decreased."
interface:
description:
- "Driver of the storage interface."
choices: ['virtio', 'ide', 'virtio_scsi']
default: 'virtio'
format:
description:
- Specify format of the disk.
- If (cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If (raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
choices: ['raw', 'cow']
storage_domain:
description:
- "Storage domain name where disk should be created. By default storage is chosen by oVirt engine."
storage_domains:
description:
- "Storage domain names where disk should be copied."
- "C(**IMPORTANT**)"
- "There is no reliable way to achieve idempotency, so every time
you specify this parameter the disks are copied, so please handle
your playbook accordingly to not copy the disks all the time. This
is valid only for VM and floating disks, template disks works
as expected."
version_added: "2.3"
force:
description:
- "Please take a look at C(image_path) documentation to see the correct
usage of this parameter."
version_added: "2.3"
profile:
description:
- "Disk profile name to be attached to disk. By default profile is chosen by oVirt engine."
bootable:
description:
- "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
shareable:
description:
- "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
logical_unit:
description:
- "Dictionary which describes LUN to be directly attached to VM:"
- "C(address) - Address of the storage server. Used by iSCSI."
- "C(port) - Port of the storage server. Used by iSCSI."
- "C(target) - iSCSI target."
- "C(lun_id) - LUN id."
- "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
- "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
- "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create and attach new disk to VM
- ovirt_disks:
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
# Attach logical unit to VM rhel7
- ovirt_disks:
vm_name: rhel7
logical_unit:
target: iqn.2016-08-09.brq.str-01:omachace
id: 1IET_000d0001
address: 10.34.63.204
interface: virtio
# Detach disk from VM
- ovirt_disks:
state: detached
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
# Upload local image to disk and attach it to vm:
# Since Ansible 2.3
- ovirt_disks:
name: mydisk
vm_name: myvm
interface: virtio
size: 10GiB
format: cow
image_path: /path/to/mydisk.qcow2
storage_domain: data
'''
RETURN = '''
id:
description: "ID of the managed disk"
returned: "On success if disk is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
disk:
description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk."
returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
disk_attachment:
description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
on your oVirt instance at following url:
https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment."
returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
'''
import os
import time
import traceback
import ssl
from httplib import HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
convert_to_bytes,
equal,
follow_link,
ovirt_full_argument_spec,
search_by_name,
wait,
)
def _search_by_lun(disks_service, lun_id):
"""
Find disk by LUN ID.
"""
res = [
disk for disk in disks_service.list(search='disk_type=lun') if (
disk.lun_storage.id == lun_id
)
]
return res[0] if res else None
def upload_disk_image(connection, module):
size = os.path.getsize(module.params['image_path'])
transfers_service = connection.system_service().image_transfers_service()
transfer = transfers_service.add(
otypes.ImageTransfer(
image=otypes.Image(
id=module.params['id'],
)
)
)
transfer_service = transfers_service.image_transfer_service(transfer.id)
try:
# After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
# Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
time.sleep(module.params['poll_interval'])
transfer = transfer_service.get()
# Set needed headers for uploading:
upload_headers = {
'Authorization': transfer.signed_ticket,
}
proxy_url = urlparse(transfer.proxy_url)
context = ssl.create_default_context()
auth = module.params['auth']
if auth.get('insecure'):
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
elif auth.get('ca_file'):
context.load_verify_locations(cafile=auth.get('ca_file'))
proxy_connection = HTTPSConnection(
proxy_url.hostname,
proxy_url.port,
context=context,
)
with open(module.params['image_path'], "rb") as disk:
chunk_size = 1024 * 1024 * 8
pos = 0
while pos < size:
transfer_service.extend()
upload_headers['Content-Range'] = "bytes %d-%d/%d" % (pos, min(pos + chunk_size, size) - 1, size)
proxy_connection.request(
'PUT',
proxy_url.path,
disk.read(chunk_size),
headers=upload_headers,
)
r = proxy_connection.getresponse()
if r.status >= 400:
raise Exception("Failed to upload disk image.")
pos += chunk_size
finally:
transfer_service.finalize()
while transfer.phase in [
otypes.ImageTransferPhase.TRANSFERRING,
otypes.ImageTransferPhase.FINALIZING_SUCCESS,
]:
time.sleep(module.params['poll_interval'])
transfer = transfer_service.get()
if transfer.phase in [
otypes.ImageTransferPhase.UNKNOWN,
otypes.ImageTransferPhase.FINISHED_FAILURE,
otypes.ImageTransferPhase.FINALIZING_FAILURE,
otypes.ImageTransferPhase.CANCELLED,
]:
raise Exception(
"Error occured while uploading image. The transfer is in %s" % transfer.phase
)
if module.params.get('logical_unit'):
disks_service = connection.system_service().disks_service()
wait(
service=disks_service.service(module.params['id']),
condition=lambda d: d.status == otypes.DiskStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
return True
class DisksModule(BaseModule):
def build_entity(self):
logical_unit = self._module.params.get('logical_unit')
return otypes.Disk(
id=self._module.params.get('id'),
name=self._module.params.get('name'),
description=self._module.params.get('description'),
format=otypes.DiskFormat(
self._module.params.get('format')
) if self._module.params.get('format') else None,
sparse=self._module.params.get('format') != 'raw',
provisioned_size=convert_to_bytes(
self._module.params.get('size')
),
storage_domains=[
otypes.StorageDomain(
name=self._module.params.get('storage_domain'),
),
],
shareable=self._module.params.get('shareable'),
lun_storage=otypes.HostStorage(
type=otypes.StorageType(
logical_unit.get('storage_type', 'iscsi')
),
logical_units=[
otypes.LogicalUnit(
address=logical_unit.get('address'),
port=logical_unit.get('port', 3260),
target=logical_unit.get('target'),
id=logical_unit.get('id'),
username=logical_unit.get('username'),
password=logical_unit.get('password'),
)
],
) if logical_unit else None,
)
def update_storage_domains(self, disk_id):
changed = False
disk_service = self._service.service(disk_id)
disk = disk_service.get()
sds_service = self._connection.system_service().storage_domains_service()
# We don't support move© for non file based storages:
if disk.storage_type != otypes.DiskStorageType.IMAGE:
return changed
# Initiate move:
if self._module.params['storage_domain']:
new_disk_storage = search_by_name(sds_service, self._module.params['storage_domain'])
changed = self.action(
action='move',
entity=disk,
action_condition=lambda d: new_disk_storage.id != d.storage_domains[0].id,
wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
storage_domain=otypes.StorageDomain(
id=new_disk_storage.id,
),
post_action=lambda _: time.sleep(self._module.params['poll_interval']),
)['changed']
if self._module.params['storage_domains']:
for sd in self._module.params['storage_domains']:
new_disk_storage = search_by_name(sds_service, sd)
changed = changed or self.action(
action='copy',
entity=disk,
action_condition=(
lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains]
),
wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK,
storage_domain=otypes.StorageDomain(
id=new_disk_storage.id,
),
)['changed']
return changed
def _update_check(self, entity):
return (
equal(self._module.params.get('description'), entity.description) and
equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
equal(self._module.params.get('shareable'), entity.shareable)
)
class DiskAttachmentsModule(DisksModule):
def build_entity(self):
return otypes.DiskAttachment(
disk=super(DiskAttachmentsModule, self).build_entity(),
interface=otypes.DiskInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
bootable=self._module.params.get('bootable'),
active=True,
)
def update_check(self, entity):
return (
super(DiskAttachmentsModule, self)._update_check(follow_link(self._connection, entity.disk)) and
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('bootable'), entity.bootable)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present'
),
id=dict(default=None),
name=dict(default=None, aliases=['alias']),
vm_name=dict(default=None),
vm_id=dict(default=None),
size=dict(default=None),
interface=dict(default=None,),
storage_domain=dict(default=None),
storage_domains=dict(default=None, type='list'),
profile=dict(default=None),
format=dict(default='cow', choices=['raw', 'cow']),
bootable=dict(default=None, type='bool'),
shareable=dict(default=None, type='bool'),
logical_unit=dict(default=None, type='dict'),
image_path=dict(default=None),
force=dict(default=False, type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
disk = None
state = module.params['state']
connection = create_connection(module.params.get('auth'))
disks_service = connection.system_service().disks_service()
disks_module = DisksModule(
connection=connection,
module=module,
service=disks_service,
)
lun = module.params.get('logical_unit')
if lun:
disk = _search_by_lun(disks_service, lun.get('id'))
ret = None
# First take care of creating the VM, if needed:
if state == 'present' or state == 'detached' or state == 'attached':
ret = disks_module.create(
entity=disk,
result_state=otypes.DiskStatus.OK if lun is None else None,
)
is_new_disk = ret['changed']
ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id'])
# We need to pass ID to the module, so in case we want detach/attach disk
# we have this ID specified to attach/detach method:
module.params['id'] = ret['id'] if disk is None else disk.id
# Upload disk image in case it's new disk or force parameter is passed:
if module.params['image_path'] and (is_new_disk or module.params['force']):
uploaded = upload_disk_image(connection, module)
ret['changed'] = ret['changed'] or uploaded
elif state == 'absent':
ret = disks_module.remove()
# If VM was passed attach/detach disks to/from the VM:
if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent':
vms_service = connection.system_service().vms_service()
# If `vm_id` isn't specified, find VM by name:
vm_id = module.params['vm_id']
if vm_id is None:
vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None)
if vm_id is None:
module.fail_json(
msg="VM don't exists, please create it first."
)
disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service()
disk_attachments_module = DiskAttachmentsModule(
connection=connection,
module=module,
service=disk_attachments_service,
changed=ret['changed'] if ret else False,
)
if state == 'present' or state == 'attached':
ret = disk_attachments_module.create()
if lun is None:
wait(
service=disk_attachments_service.service(ret['id']),
condition=lambda d:follow_link(connection, d.disk).status == otypes.DiskStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
elif state == 'detached':
ret = disk_attachments_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
| civisanalytics/ansible | lib/ansible/modules/cloud/ovirt/ovirt_disks.py | Python | gpl-3.0 | 20,012 |
# This import depends on the automake rule protoc_middleman, please make sure
# protoc_middleman has been built before run this file.
import json
import re
import os.path
# BEGIN OPENSOURCE
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# END OPENSOURCE
import tmp.benchmarks_pb2 as benchmarks_pb2
__file_size_map = {}
def __get_data_size(filename):
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename
if filename in __file_size_map:
return __file_size_map[filename]
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(
open(filename, "rb").read())
size = 0
count = 0
for payload in benchmark_dataset.payload:
size += len(payload)
count += 1
__file_size_map[filename] = (size, 1.0 * size / count)
return size, 1.0 * size / count
def __extract_file_name(file_name):
name_list = re.split(r"[/\.]", file_name)
short_file_name = ""
for name in name_list:
if name[:14] == "google_message":
short_file_name = name
return short_file_name
__results = []
# CPP results example:
# [
# "benchmarks": [
# {
# "bytes_per_second": int,
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_cpp_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
re.split("(_parse_|_serialize)", benchmark["name"])[0])
behavior = benchmark["name"][len(data_filename) + 1:]
if data_filename[:2] == "BM":
data_filename = data_filename[3:]
__results.append({
"language": "cpp",
"dataFilename": data_filename,
"behavior": behavior,
"throughput": benchmark["bytes_per_second"] / 2.0 ** 20
})
# Synthetic benchmark results example:
# [
# "benchmarks": [
# {
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_synthetic_result(filename):
if filename == "":
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
"language": "cpp",
"dataFilename": "",
"behavior": "synthetic",
"throughput": 10.0**9 / benchmark["cpu_time_ns"]
})
# Python results example:
# [
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ], #pure-python
# ...
# ]
def __parse_python_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
# Java results example:
# [
# {
# "id": string,
# "instrumentSpec": {...},
# "measurements": [
# {
# "weight": float,
# "value": {
# "magnitude": float,
# "unit": string
# },
# ...
# },
# ...
# ],
# "run": {...},
# "scenario": {
# "benchmarkSpec": {
# "methodName": string,
# "parameters": {
# defined parameters in the benchmark: parameters value
# },
# ...
# },
# ...
# }
#
# },
# ...
# ]
def __parse_java_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
total_value = 0
for measurement in result["measurements"]:
total_weight += measurement["weight"]
total_value += measurement["value"]["magnitude"]
avg_time = total_value * 1.0 / total_weight
total_size, _ = __get_data_size(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
__results.append({
"language": "java",
"throughput": total_size / avg_time * 1e9 / 2 ** 20,
"behavior": result["scenario"]["benchmarkSpec"]["methodName"],
"dataFilename": __extract_file_name(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
})
# Go benchmark results:
#
# goos: linux
# goarch: amd64
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op
# PASS
# ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s
def __parse_go_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
if last_dash == -1:
behavior = behavior_with_suffix
else:
behavior = behavior_with_suffix[:last_dash]
__results.append({
"dataFilename": __extract_file_name(full_filename),
"throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
"behavior": behavior,
"language": "go"
})
# Self built json results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def __parse_custom_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_js_result(filename, language):
return __parse_custom_result(filename, language)
def __parse_php_result(filename, language):
return __parse_custom_result(filename, language)
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file="",
node_file="",
php_c_file="",
php_file=""):
results = {}
if cpp_file != "":
__parse_cpp_result(cpp_file)
if java_file != "":
__parse_java_result(java_file)
if python_file != "":
__parse_python_result(python_file)
if go_file != "":
__parse_go_result(go_file)
if synthetic_file != "":
__parse_synthetic_result(synthetic_file)
if node_file != "":
__parse_js_result(node_file, "node")
if php_file != "":
__parse_php_result(php_file, "php")
if php_c_file != "":
__parse_php_result(php_c_file, "php")
return __results
| scheib/chromium | third_party/protobuf/benchmarks/util/result_parser.py | Python | bsd-3-clause | 8,710 |
#!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and either isn't in an application in INSTALLED_APPS or else was imported
# before its application was loaded."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures))
| tragiclifestories/django | tests/runtests.py | Python | bsd-3-clause | 16,531 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <http://github.com/afterburn>, Aaron Bull Schaefer <[email protected]>)
# and apt (Matthew Williams <[email protected]>) modules.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
version_added: "2.0"
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.4"
name:
description:
- A package name, like C(foo), or multiple packages, like C(foo, bar).
required: false
default: null
repository:
description:
- A package repository or multiple repositories.
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
required: false
default: null
version_added: "2.4"
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
notes:
- '"name" and "upgrade" are mutually exclusive.'
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk:
name: foo
update_cache: yes
# Update repositories and install "foo" and "bar" packages
- apk:
name: foo,bar
update_cache: yes
# Remove "foo" package
- apk:
name: foo
state: absent
# Remove "foo" and "bar" packages
- apk:
name: foo,bar
state: absent
# Install the package "foo"
- apk:
name: foo
state: present
# Install the packages "foo" and "bar"
- apk:
name: foo,bar
state: present
# Update repositories and update package "foo" to latest version
- apk:
name: foo
state: latest
update_cache: yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk:
name: foo,bar
state: latest
update_cache: yes
# Update all installed packages to the latest versions
- apk:
upgrade: yes
# Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
- apk:
available: yes
upgrade: yes
# Update repositories as a separate step
- apk:
update_cache: yes
# Install package from a specific repository
- apk:
name: foo
state: latest
update_cache: yes
repository: http://dl-3.alpinelinux.org/alpine/edge/main
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_toplevel(module, name):
# /etc/apk/world contains a list of top-level packages separated by ' ' or \n
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
with open('/etc/apk/world') as f:
content = f.read().split()
for p in content:
if regex.search(p):
return True
return False
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_toplevel(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
# Check to see if packages are still present because of dependencies
for name in installed:
if query_package(module, name):
rc = 1
break
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list'),
repository=dict(type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
available=dict(default='no', type='bool'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()
| ravibhure/ansible | lib/ansible/modules/packaging/os/apk.py | Python | gpl-3.0 | 11,310 |
"""The tests for the Rfxtrx component."""
# pylint: disable=protected-access
import unittest
import pytest
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.components import rfxtrx as rfxtrx
from tests.common import get_test_home_assistant
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestRFXTRX(unittest.TestCase):
"""Test the Rfxtrx component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
rfxtrx.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx.RFX_DEVICES = {}
if rfxtrx.RFXOBJECT:
rfxtrx.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
self.assertEqual(len(rfxtrx.RFXOBJECT.sensors()), 2)
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}}))
self.hass.config.components.remove('rfxtrx')
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True,
'debug': True}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {}
}))
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'invalid_key': True}}))
def test_fire_event(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(rfxtrx.EVENT_BUTTON_PRESSED, record_event)
self.hass.block_till_done()
entity = rfxtrx.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.should_fire_event)
event = rfxtrx.get_rfx_object('0b1100cd0213c7f210010f51')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(event.values['Command'], "On")
self.assertEqual('on', entity.state)
self.assertEqual(self.hass.states.get('switch.test').state, 'on')
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'switch.test', 'state': 'on'})
def test_fire_event_sensor(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0a520802060100ff0e0269': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen("signal_received", record_event)
self.hass.block_till_done()
event = rfxtrx.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'sensor.test'})
| xifle/home-assistant | tests/components/test_rfxtrx.py | Python | mit | 5,523 |
import jinja2
from jingo import register
from tower import ugettext_lazy as _lazy
from mkt.site.helpers import page_title
@register.function
@jinja2.contextfunction
def operators_page_title(context, title=None):
section = _lazy('Operator Dashboard')
title = u'%s | %s' % (title, section) if title else section
return page_title(context, title)
| jamesthechamp/zamboni | mkt/operators/helpers.py | Python | bsd-3-clause | 360 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class CashFlowMapper(Document):
pass
| ovresko/erpnext | erpnext/accounts/doctype/cash_flow_mapper/cash_flow_mapper.py | Python | gpl-3.0 | 267 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
class Context:
"""Represents the context in which a command is being invoked under.
This class contains a lot of meta data to help you understand more about
the invocation context. This class is not created manually and is instead
passed around to commands by passing in :attr:`Command.pass_context`.
Attributes
-----------
message : :class:`discord.Message`
The message that triggered the command being executed.
bot : :class:`Bot`
The bot that contains the command being executed.
args : list
The list of transformed arguments that were passed into the command.
If this is accessed during the :func:`on_command_error` event
then this list could be incomplete.
kwargs : dict
A dictionary of transformed arguments that were passed into the command.
Similar to :attr:`args`\, if this is accessed in the
:func:`on_command_error` event then this dict could be incomplete.
prefix : str
The prefix that was used to invoke the command.
command
The command (i.e. :class:`Command` or its superclasses) that is being
invoked currently.
invoked_with : str
The command name that triggered this invocation. Useful for finding out
which alias called the command.
invoked_subcommand
The subcommand (i.e. :class:`Command` or its superclasses) that was
invoked. If no valid subcommand was invoked then this is equal to
`None`.
subcommand_passed : Optional[str]
The string that was attempted to call a subcommand. This does not have
to point to a valid registered subcommand and could just point to a
nonsense string. If nothing was passed to attempt a call to a
subcommand then this is set to `None`.
"""
def __init__(self, **attrs):
self.message = attrs.pop('message', None)
self.bot = attrs.pop('bot', None)
self.args = attrs.pop('args', [])
self.kwargs = attrs.pop('kwargs', {})
self.prefix = attrs.pop('prefix')
self.command = attrs.pop('command', None)
self.view = attrs.pop('view', None)
self.invoked_with = attrs.pop('invoked_with', None)
self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
self.subcommand_passed = attrs.pop('subcommand_passed', None)
@asyncio.coroutine
def invoke(self, command, *args, **kwargs):
"""|coro|
Calls a command with the arguments given.
This is useful if you want to just call the callback that a
:class:`Command` holds internally.
Note
------
You do not pass in the context as it is done for you.
Parameters
-----------
command : :class:`Command`
A command or superclass of a command that is going to be called.
\*args
The arguments to to use.
\*\*kwargs
The keyword arguments to use.
"""
arguments = []
if command.instance is not None:
arguments.append(command.instance)
if command.pass_context:
arguments.append(self)
arguments.extend(args)
ret = yield from command.callback(*arguments, **kwargs)
return ret
@property
def cog(self):
"""Returns the cog associated with this context's command. None if it does not exist."""
if self.command is None:
return None
return self.command.instance
| LordDamionDevil/Lony | lib/discord/ext/commands/context.py | Python | gpl-3.0 | 4,638 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.assets import AssetFinder
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df)
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
def test_yahoo_bars_to_panel_source(self):
finder = AssetFinder()
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
copy_panel = data.copy()
sids = finder.map_identifier_index_to_sids(
data.items, data.major_axis[0]
)
copy_panel.items = sids
source = DataPanelSource(copy_panel)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertTrue(event['sid'] in sids)
def test_nan_filter_dataframe(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
def test_nan_filter_panel(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
| cmorgan/zipline | tests/test_sources.py | Python | apache-2.0 | 7,041 |
import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Porter.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environmenty
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacuumAgent, ModelBasedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop() | austinban/aima-python | submissions/Porter/vacuum2Runner.py | Python | mit | 6,343 |
#!/usr/bin/env python
"""
Implement common functions for tests
"""
from __future__ import print_function
from __future__ import unicode_literals
import io
import sys
def parse_yaml(yaml_file):
"""
Parses a yaml file, returning its contents as a dict.
"""
try:
import yaml
except ImportError:
sys.exit("Unable to import yaml module.")
try:
with io.open(yaml_file, encoding='utf-8') as fname:
return yaml.load(fname)
except IOError:
sys.exit("Unable to open YAML file: {0}".format(yaml_file))
| ivandgreat/netmiko | tests/test_utils.py | Python | mit | 567 |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import waterfall_window
import common
from gnuradio import gr, blks2
from pubsub import pubsub
from constants import *
##################################################
# Waterfall sink block (wrapper for old wxgui)
##################################################
class _waterfall_sink_base(gr.hier_block2, common.wxgui_hb):
"""
An fft block with real/complex inputs and a gui window.
"""
def __init__(
self,
parent,
baseband_freq=0,
ref_level=50,
sample_rate=1,
fft_size=512,
fft_rate=waterfall_window.DEFAULT_FRAME_RATE,
average=False,
avg_alpha=None,
title='',
size=waterfall_window.DEFAULT_WIN_SIZE,
ref_scale=2.0,
dynamic_range=80,
num_lines=256,
win=None,
**kwargs #do not end with a comma
):
#ensure avg alpha
if avg_alpha is None: avg_alpha = 2.0/fft_rate
#init
gr.hier_block2.__init__(
self,
"waterfall_sink",
gr.io_signature(1, 1, self._item_size),
gr.io_signature(0, 0, 0),
)
#blocks
fft = self._fft_chain(
sample_rate=sample_rate,
fft_size=fft_size,
frame_rate=fft_rate,
ref_scale=ref_scale,
avg_alpha=avg_alpha,
average=average,
win=win,
)
msgq = gr.msg_queue(2)
sink = gr.message_sink(gr.sizeof_float*fft_size, msgq, True)
#controller
self.controller = pubsub()
self.controller.subscribe(AVERAGE_KEY, fft.set_average)
self.controller.publish(AVERAGE_KEY, fft.average)
self.controller.subscribe(AVG_ALPHA_KEY, fft.set_avg_alpha)
self.controller.publish(AVG_ALPHA_KEY, fft.avg_alpha)
self.controller.subscribe(SAMPLE_RATE_KEY, fft.set_sample_rate)
self.controller.publish(SAMPLE_RATE_KEY, fft.sample_rate)
self.controller.subscribe(DECIMATION_KEY, fft.set_decimation)
self.controller.publish(DECIMATION_KEY, fft.decimation)
self.controller.subscribe(FRAME_RATE_KEY, fft.set_vec_rate)
self.controller.publish(FRAME_RATE_KEY, fft.frame_rate)
#start input watcher
common.input_watcher(msgq, self.controller, MSG_KEY)
#create window
self.win = waterfall_window.waterfall_window(
parent=parent,
controller=self.controller,
size=size,
title=title,
real=self._real,
fft_size=fft_size,
num_lines=num_lines,
baseband_freq=baseband_freq,
decimation_key=DECIMATION_KEY,
sample_rate_key=SAMPLE_RATE_KEY,
frame_rate_key=FRAME_RATE_KEY,
dynamic_range=dynamic_range,
ref_level=ref_level,
average_key=AVERAGE_KEY,
avg_alpha_key=AVG_ALPHA_KEY,
msg_key=MSG_KEY,
)
common.register_access_methods(self, self.win)
setattr(self.win, 'set_baseband_freq', getattr(self, 'set_baseband_freq')) #BACKWARDS
#connect
self.wxgui_connect(self, fft, sink)
class waterfall_sink_f(_waterfall_sink_base):
_fft_chain = blks2.logpwrfft_f
_item_size = gr.sizeof_float
_real = True
class waterfall_sink_c(_waterfall_sink_base):
_fft_chain = blks2.logpwrfft_c
_item_size = gr.sizeof_gr_complex
_real = False
# ----------------------------------------------------------------
# Standalone test app
# ----------------------------------------------------------------
import wx
from gnuradio.wxgui import stdgui2
class test_top_block (stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__ (self, frame, panel, vbox, argv)
fft_size = 512
# build our flow graph
input_rate = 20.000e3
# Generate a complex sinusoid
self.src1 = gr.sig_source_c (input_rate, gr.GR_SIN_WAVE, 5.75e3, 1000)
#src1 = gr.sig_source_c (input_rate, gr.GR_CONST_WAVE, 5.75e3, 1000)
# We add these throttle blocks so that this demo doesn't
# suck down all the CPU available. Normally you wouldn't use these.
self.thr1 = gr.throttle(gr.sizeof_gr_complex, input_rate)
sink1 = waterfall_sink_c (panel, title="Complex Data", fft_size=fft_size,
sample_rate=input_rate, baseband_freq=100e3)
self.connect(self.src1, self.thr1, sink1)
vbox.Add (sink1.win, 1, wx.EXPAND)
# generate a real sinusoid
self.src2 = gr.sig_source_f (input_rate, gr.GR_SIN_WAVE, 5.75e3, 1000)
self.thr2 = gr.throttle(gr.sizeof_float, input_rate)
sink2 = waterfall_sink_f (panel, title="Real Data", fft_size=fft_size,
sample_rate=input_rate, baseband_freq=100e3)
self.connect(self.src2, self.thr2, sink2)
vbox.Add (sink2.win, 1, wx.EXPAND)
def main ():
app = stdgui2.stdapp (test_top_block, "Waterfall Sink Test App")
app.MainLoop ()
if __name__ == '__main__':
main ()
| pgoeser/gnuradio | gr-wxgui/src/python/waterfallsink_gl.py | Python | gpl-3.0 | 5,449 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.util.dirutil import safe_mkdir_for
class ReproMixin(object):
""" Additional helper methods for use in Repro tests"""
def add_file(self, root, path, content):
"""Add a file with specified contents
:param str root: Root directory for path.
:param str path: Path relative to root.
:param str content: Content to write to file.
"""
fullpath = os.path.join(root, path)
safe_mkdir_for(fullpath)
with open(fullpath, 'w') as outfile:
outfile.write(content)
def assert_not_exists(self, root, path):
"""Assert a file at relpath doesn't exist
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertFalse(os.path.exists(fullpath))
def assert_file(self, root, path, expected_content=None):
""" Assert that a file exists with the content specified
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:param str expected_content: file contents.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertTrue(os.path.isfile(fullpath))
if expected_content:
with open(fullpath, 'r') as infile:
content = infile.read()
self.assertEqual(expected_content, content)
| baroquebobcat/pants | tests/python/pants_test/init/repro_mixin.py | Python | apache-2.0 | 1,635 |
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import unittest
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
class TestFirefoxProfile:
def setup_method(self, method):
self.driver = webdriver.Firefox()
self.webserver = SimpleWebServer()
self.webserver.start()
def test_that_we_can_accept_a_profile(self):
profile1 = webdriver.FirefoxProfile()
profile1.set_preference("startup.homepage_welcome_url",
self.webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = webdriver.FirefoxProfile(profile1.path)
driver = webdriver.Firefox(firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_unicode_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_integer_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert True == profile.default_preferences["sample.bool.preference"]
def test_that_we_delete_the_profile(self):
path = self.driver.firefox_profile.path
self.driver.quit()
assert not os.path.exists(path)
def test_profiles_do_not_share_preferences(self):
self.profile1 = webdriver.FirefoxProfile()
self.profile1.accept_untrusted_certs = False
self.profile2 = webdriver.FirefoxProfile()
# Default is true. Should remain so.
assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] == True
def test_none_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = None
try:
self.profile.set_proxy(proxy)
assert False, "exception after passing empty proxy is expected"
except ValueError as e:
pass
assert "network.proxy.type" not in self.profile.default_preferences
def test_unspecified_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
self.profile.set_proxy(proxy)
assert "network.proxy.type" not in self.profile.default_preferences
def test_manual_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.no_proxy = 'localhost, foo.localhost'
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = None
proxy.sslProxy = 'some2.url'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value']
assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost'
assert self.profile.default_preferences["network.proxy.http"] == 'some.url'
assert self.profile.default_preferences["network.proxy.http_port"] == 1234
assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url'
assert "network.proxy.ssl_port" not in self.profile.default_preferences
assert "network.proxy.ftp" not in self.profile.default_preferences
def test_pac_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.proxy_autoconfig_url = 'http://some.url:12345/path'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value']
assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path'
def test_autodetect_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.auto_detect = True
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value']
def teardown_method(self, method):
try:
self.driver.quit()
except:
pass #don't care since we may have killed the browser above
self.webserver.stop()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def teardown_module(module):
try:
TestFirefoxProfile.driver.quit()
except:
pass #Don't Care since we may have killed the browser above
| onedox/selenium | py/test/selenium/webdriver/firefox/ff_profile_tests.py | Python | apache-2.0 | 8,146 |
from tests.package.test_python import TestPythonPackageBase
class TestPythonPy2Can(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON=y
BR2_PACKAGE_PYTHON_CAN=y
"""
sample_scripts = ["tests/package/sample_python_can.py"]
timeout = 40
class TestPythonPy3Can(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_CAN=y
"""
sample_scripts = ["tests/package/sample_python_can.py"]
timeout = 40
| masahir0y/buildroot-yamada | support/testing/tests/package/test_python_can.py | Python | gpl-2.0 | 617 |
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/resultset.py | Python | gpl-3.0 | 6,557 |
"""Provides factories for Split."""
from xmodule.modulestore import ModuleStoreEnum
from xmodule.course_module import CourseDescriptor
from xmodule.x_module import XModuleDescriptor
import factory
from factory.helpers import lazy_attribute
from opaque_keys.edx.keys import UsageKey
# Factories don't have __init__ methods, and are self documenting
# pylint: disable=W0232, C0111
class SplitFactory(factory.Factory):
"""
Abstracted superclass which defines modulestore so that there's no dependency on django
if the caller passes modulestore in kwargs
"""
@lazy_attribute
def modulestore(self):
# Delayed import so that we only depend on django if the caller
# hasn't provided their own modulestore
from xmodule.modulestore.django import modulestore
return modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split)
class PersistentCourseFactory(SplitFactory):
"""
Create a new course (not a new version of a course, but a whole new index entry).
keywords: any xblock field plus (note, the below are filtered out; so, if they
become legitimate xblock fields, they won't be settable via this factory)
* org: defaults to textX
* master_branch: (optional) defaults to ModuleStoreEnum.BranchName.draft
* user_id: (optional) defaults to 'test_user'
* display_name (xblock field): will default to 'Robot Super Course' unless provided
"""
FACTORY_FOR = CourseDescriptor
# pylint: disable=W0613
@classmethod
def _create(cls, target_class, course='999', run='run', org='testX', user_id=ModuleStoreEnum.UserID.test,
master_branch=ModuleStoreEnum.BranchName.draft, **kwargs):
modulestore = kwargs.pop('modulestore')
root_block_id = kwargs.pop('root_block_id', 'course')
# Write the data to the mongo datastore
new_course = modulestore.create_course(
org, course, run, user_id, fields=kwargs,
master_branch=master_branch, root_block_id=root_block_id
)
return new_course
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError()
class ItemFactory(SplitFactory):
FACTORY_FOR = XModuleDescriptor
display_name = factory.LazyAttributeSequence(lambda o, n: "{} {}".format(o.category, n))
# pylint: disable=W0613
@classmethod
def _create(cls, target_class, parent_location, category='chapter',
user_id=ModuleStoreEnum.UserID.test, definition_locator=None, force=False,
continue_version=False, **kwargs):
"""
passes *kwargs* as the new item's field values:
:param parent_location: (required) the location of the course & possibly parent
:param category: (defaults to 'chapter')
:param definition_locator (optional): the DescriptorLocator for the definition this uses or branches
"""
modulestore = kwargs.pop('modulestore')
if isinstance(parent_location, UsageKey):
return modulestore.create_child(
user_id, parent_location, category, defintion_locator=definition_locator,
force=force, continue_version=continue_version, **kwargs
)
else:
return modulestore.create_item(
user_id, parent_location, category, defintion_locator=definition_locator,
force=force, continue_version=continue_version, **kwargs
)
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError()
| LICEF/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/persistent_factories.py | Python | agpl-3.0 | 3,589 |
from Model import *
| joshrule/LOTlib | LOTlib/Projects/NumberGame/__init__.py | Python | gpl-3.0 | 21 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.exceptions import ValidationError
import unittest
class TestBatch(unittest.TestCase):
def test_item_has_batch_enabled(self):
self.assertRaises(ValidationError, frappe.get_doc({
"doctype": "Batch",
"name": "_test Batch",
"item": "_Test Item"
}).save) | indictranstech/focal-erpnext | stock/doctype/batch/test_batch.py | Python | agpl-3.0 | 421 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..specialized import BRAINSDemonWarp
def test_BRAINSDemonWarp_inputs():
input_map = dict(args=dict(argstr='%s',
),
arrayOfPyramidLevelIterations=dict(argstr='--arrayOfPyramidLevelIterations %s',
sep=',',
),
backgroundFillValue=dict(argstr='--backgroundFillValue %d',
),
checkerboardPatternSubdivisions=dict(argstr='--checkerboardPatternSubdivisions %s',
sep=',',
),
environ=dict(nohash=True,
usedefault=True,
),
fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s',
),
fixedVolume=dict(argstr='--fixedVolume %s',
),
gradient_type=dict(argstr='--gradient_type %s',
),
gui=dict(argstr='--gui ',
),
histogramMatch=dict(argstr='--histogramMatch ',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s',
),
initializeWithTransform=dict(argstr='--initializeWithTransform %s',
),
inputPixelType=dict(argstr='--inputPixelType %s',
),
interpolationMode=dict(argstr='--interpolationMode %s',
),
lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d',
),
maskProcessingMode=dict(argstr='--maskProcessingMode %s',
),
max_step_length=dict(argstr='--max_step_length %f',
),
medianFilterSize=dict(argstr='--medianFilterSize %s',
sep=',',
),
minimumFixedPyramid=dict(argstr='--minimumFixedPyramid %s',
sep=',',
),
minimumMovingPyramid=dict(argstr='--minimumMovingPyramid %s',
sep=',',
),
movingBinaryVolume=dict(argstr='--movingBinaryVolume %s',
),
movingVolume=dict(argstr='--movingVolume %s',
),
neighborhoodForBOBF=dict(argstr='--neighborhoodForBOBF %s',
sep=',',
),
numberOfBCHApproximationTerms=dict(argstr='--numberOfBCHApproximationTerms %d',
),
numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d',
),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d',
),
numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputCheckerboardVolume=dict(argstr='--outputCheckerboardVolume %s',
hash_files=False,
),
outputDebug=dict(argstr='--outputDebug ',
),
outputDisplacementFieldPrefix=dict(argstr='--outputDisplacementFieldPrefix %s',
),
outputDisplacementFieldVolume=dict(argstr='--outputDisplacementFieldVolume %s',
hash_files=False,
),
outputNormalized=dict(argstr='--outputNormalized ',
),
outputPixelType=dict(argstr='--outputPixelType %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
promptUser=dict(argstr='--promptUser ',
),
registrationFilterType=dict(argstr='--registrationFilterType %s',
),
seedForBOBF=dict(argstr='--seedForBOBF %s',
sep=',',
),
smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f',
),
terminal_output=dict(nohash=True,
),
upFieldSmoothing=dict(argstr='--upFieldSmoothing %f',
),
upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d',
),
use_vanilla_dem=dict(argstr='--use_vanilla_dem ',
),
)
inputs = BRAINSDemonWarp.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSDemonWarp_outputs():
output_map = dict(outputCheckerboardVolume=dict(),
outputDisplacementFieldVolume=dict(),
outputVolume=dict(),
)
outputs = BRAINSDemonWarp.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| sgiavasis/nipype | nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py | Python | bsd-3-clause | 3,971 |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"NetworkedMultiplayerENet",
]
def get_doc_path():
return "doc_classes"
| NateWardawg/godot | modules/enet/config.py | Python | mit | 201 |
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.interpolate as interp
from numpy.testing import assert_almost_equal
class TestRegression(object):
def test_spalde_scalar_input(self):
"""Ticket #629"""
x = np.linspace(0,10)
y = x**3
tck = interp.splrep(x, y, k=3, t=[5])
res = interp.spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_almost_equal(res, des)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_regression.py | Python | mit | 484 |
class TestRouter(object):
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
The Tribble model should be the only one to appear in the 'other' db.
"""
if model_name == 'tribble':
return db == 'other'
elif db == 'other':
return False
| filias/django | tests/migrations/routers.py | Python | bsd-3-clause | 320 |
# -*- coding: utf-8 -*-
"""
Test cases related to XPath evaluation and the XPath class
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, _bytes, BytesIO
from common_imports import doctest, make_doctest
class ETreeXPathTestCase(HelperTestCase):
"""XPath tests etree"""
def test_xpath_boolean(self):
tree = self.parse('<a><b></b><b></b></a>')
self.assert_(tree.xpath('boolean(/a/b)'))
self.assert_(not tree.xpath('boolean(/a/c)'))
def test_xpath_number(self):
tree = self.parse('<a>1</a>')
self.assertEquals(1.,
tree.xpath('number(/a)'))
tree = self.parse('<a>A</a>')
actual = str(tree.xpath('number(/a)'))
expected = ['nan', '1.#qnan', 'nanq']
if not actual.lower() in expected:
self.fail('Expected a NAN value, got %s' % actual)
def test_xpath_string(self):
tree = self.parse('<a>Foo</a>')
self.assertEquals('Foo',
tree.xpath('string(/a/text())'))
def test_xpath_document_root(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/'))
def test_xpath_namespace(self):
tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>')
self.assert_((None, "test") in tree.xpath('namespace::*'))
self.assert_(('p', 'myURI') in tree.xpath('namespace::*'))
def test_xpath_namespace_empty(self):
tree = self.parse('<a/>')
self.assertEquals([('xml', 'http://www.w3.org/XML/1998/namespace')],
tree.xpath('namespace::*'))
def test_xpath_list_elements(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals([root[0], root[1]],
tree.xpath('/a/b'))
def test_xpath_list_nothing(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/a/c'))
# this seems to pass a different code path, also should return nothing
self.assertEquals([],
tree.xpath('/a/c/text()'))
def test_xpath_list_text(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals(['Foo', 'Bar'],
tree.xpath('/a/b/text()'))
def test_xpath_list_text_parent(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_text_parent_no_smart_strings(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=True))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=False))
self.assertEquals([False, False],
[hasattr(r, 'getparent') for r in
tree.xpath('/a/b/text()', smart_strings=False)])
def test_xpath_list_unicode_text_parent(self):
xml = _bytes('<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>').decode("unicode_escape")
tree = self.parse(xml.encode('utf-8'))
root = tree.getroot()
self.assertEquals([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"),
_bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEquals(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(False, hasattr(results[0], 'getparent'))
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEquals(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assert_(el.xpath('boolean(c)'))
self.assert_(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEquals([c[0], c[1]],
c.xpath('b'))
self.assertEquals([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEquals(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEquals(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEquals(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(c)
self.assertEqual('/c/d',
tree.getpath(d2)[:4])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_xpath_evaluator(self):
tree = self.parse('<a><b><c></c></b></a>')
e = etree.XPathEvaluator(tree)
root = tree.getroot()
self.assertEquals(
[root],
e('//a'))
def test_xpath_evaluator_tree(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('a'))
root = child_tree.getroot()
self.assertEquals(
[root[0]],
e('c'))
def test_xpath_evaluator_tree_absolute(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('/a'))
root = child_tree.getroot()
self.assertEquals(
[root],
e('/b'))
self.assertEquals(
[],
e('/c'))
def test_xpath_evaluator_element(self):
tree = self.parse('<a><b><c></c></b></a>')
root = tree.getroot()
e = etree.XPathEvaluator(root[0])
self.assertEquals(
[root[0][0]],
e('c'))
def test_xpath_extensions(self):
def foo(evaluator, a):
return 'hello %s' % a
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertEquals(
"hello you", e("foo('you')"))
def test_xpath_extensions_wrong_args(self):
def foo(evaluator, a, b):
return "hello %s and %s" % (a, b)
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(TypeError, e, "foo('you')")
def test_xpath_extensions_error(self):
def foo(evaluator, a):
return 1/0
extension = {(None, 'foo'): foo}
tree = self.parse('<a/>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(ZeroDivisionError, e, "foo('test')")
def test_xpath_extensions_nodes(self):
def f(evaluator, arg):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo('World')/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append(self):
def f(evaluator, nodes):
r = etree.SubElement(nodes[0], 'results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append2(self):
def f(evaluator, nodes):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
r.append(nodes[0])
return r
x = self.parse('<result>Honk</result>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEquals(3, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
self.assertEquals('Honk', r[2].text)
def test_xpath_context_node(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = []
def check_context(ctxt, nodes):
self.assertEquals(len(nodes), 1)
check_call.append(nodes[0].tag)
self.assertEquals(ctxt.context_node, nodes[0])
return True
find = etree.XPath("//*[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
find(tree)
check_call.sort()
self.assertEquals(check_call, ["a", "b", "c", "root"])
def test_xpath_eval_context_propagation(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt, nodes):
self.assertEquals(len(nodes), 1)
tag = nodes[0].tag
# empty during the "b" call, a "b" during the "c" call
check_call[tag] = ctxt.eval_context.get("b")
ctxt.eval_context[tag] = tag
return True
find = etree.XPath("//b[p:foo(.)]/c[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1][0]])
self.assertEquals(check_call, {'b':None, 'c':'b'})
def test_xpath_eval_context_clear(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt):
check_call["done"] = True
# context must be empty for each new evaluation
self.assertEquals(len(ctxt.eval_context), 0)
ctxt.eval_context["test"] = True
return True
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1]])
self.assertEquals(check_call["done"], True)
check_call.clear()
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1]])
self.assertEquals(check_call["done"], True)
def test_xpath_variables(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
expr = "/a[@attr=$aval]"
r = e(expr, aval=1)
self.assertEquals(0, len(r))
r = e(expr, aval="true")
self.assertEquals(1, len(r))
self.assertEquals("true", r[0].get('attr'))
r = e(expr, aval=True)
self.assertEquals(1, len(r))
self.assertEquals("true", r[0].get('attr'))
def test_xpath_variables_nodeset(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
element = etree.Element("test-el")
etree.SubElement(element, "test-sub")
expr = "$value"
r = e(expr, value=element)
self.assertEquals(1, len(r))
self.assertEquals(element.tag, r[0].tag)
self.assertEquals(element[0].tag, r[0][0].tag)
def test_xpath_extensions_mix(self):
x = self.parse('<a attr="true"><test/></a>')
class LocalException(Exception):
pass
def foo(evaluator, a, varval):
etree.Element("DUMMY")
if varval == 0:
raise LocalException
elif varval == 1:
return ()
elif varval == 2:
return None
elif varval == 3:
return a[0][0]
a = a[0]
if a.get("attr") == str(varval):
return a
else:
return etree.Element("NODE")
extension = {(None, 'foo'): foo}
e = etree.XPathEvaluator(x, extensions=[extension])
del x
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
r = e("foo(., $value)", value=1)
self.assertEqual(len(r), 0)
r = e("foo(., 1)")
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=2)
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=3)
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "test")
r = e("foo(., $value)", value="false")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'false')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'true')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertEqual(r[0][0].tag, "test")
r = e("foo(., $value)", value="true")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
class ETreeXPathClassTestCase(HelperTestCase):
"Tests for the XPath class"
def test_xpath_compile_doc(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr != 'true']")
r = expr(x)
self.assertEquals(0, len(r))
expr = etree.XPath("/a[@attr = 'true']")
r = expr(x)
self.assertEquals(1, len(r))
expr = etree.XPath( expr.path )
r = expr(x)
self.assertEquals(1, len(r))
def test_xpath_compile_element(self):
x = self.parse('<a><b/><c/></a>')
root = x.getroot()
expr = etree.XPath("./b")
r = expr(root)
self.assertEquals(1, len(r))
self.assertEquals('b', r[0].tag)
expr = etree.XPath("./*")
r = expr(root)
self.assertEquals(2, len(r))
def test_xpath_compile_vars(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr=$aval]")
r = expr(x, aval=False)
self.assertEquals(0, len(r))
r = expr(x, aval=True)
self.assertEquals(1, len(r))
def test_xpath_compile_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
def test_xpath_elementtree_error(self):
self.assertRaises(ValueError, etree.XPath('*'), etree.ElementTree())
class ETreeETXPathClassTestCase(HelperTestCase):
"Tests for the ETXPath class"
def test_xpath_compile_ns(self):
x = self.parse('<a><b xmlns="nsa"/><b xmlns="nsb"/></a>')
expr = etree.ETXPath("/a/{nsa}b")
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals('{nsa}b', r[0].tag)
expr = etree.ETXPath("/a/{nsb}b")
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals('{nsb}b', r[0].tag)
# disabled this test as non-ASCII characters in namespace URIs are
# not acceptable
def _test_xpath_compile_unicode(self):
x = self.parse(_bytes('<a><b xmlns="http://nsa/\\uf8d2"/><b xmlns="http://nsb/\\uf8d1"/></a>'
).decode("unicode_escape"))
expr = etree.ETXPath(_bytes("/a/{http://nsa/\\uf8d2}b").decode("unicode_escape"))
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals(_bytes('{http://nsa/\\uf8d2}b').decode("unicode_escape"), r[0].tag)
expr = etree.ETXPath(_bytes("/a/{http://nsb/\\uf8d1}b").decode("unicode_escape"))
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals(_bytes('{http://nsb/\\uf8d1}b').decode("unicode_escape"), r[0].tag)
SAMPLE_XML = etree.parse(BytesIO("""
<body>
<tag>text</tag>
<section>
<tag>subtext</tag>
</section>
<tag />
<tag />
</body>
"""))
def tag(elem):
return elem.tag
def stringTest(ctxt, s1):
return "Hello "+s1
def floatTest(ctxt, f1):
return f1+4
def booleanTest(ctxt, b1):
return not b1
def setTest(ctxt, st1):
return st1[0]
def setTest2(ctxt, st1):
return st1[0:2]
def argsTest1(ctxt, s, f, b, st):
return ", ".join(map(str, (s, f, b, list(map(tag, st)))))
def argsTest2(ctxt, st1, st2):
st1.extend(st2)
return st1
def resultTypesTest(ctxt):
return ["x","y"]
def resultTypesTest2(ctxt):
return resultTypesTest
uri = "http://www.example.com/"
extension = {(None, 'stringTest'): stringTest,
(None, 'floatTest'): floatTest,
(None, 'booleanTest'): booleanTest,
(None, 'setTest'): setTest,
(None, 'setTest2'): setTest2,
(None, 'argsTest1'): argsTest1,
(None, 'argsTest2'): argsTest2,
(None, 'resultTypesTest'): resultTypesTest,
(None, 'resultTypesTest2'): resultTypesTest2,}
def xpath():
"""
Test xpath extension functions.
>>> root = SAMPLE_XML
>>> e = etree.XPathEvaluator(root, extensions=[extension])
>>> e("stringTest('you')")
'Hello you'
>>> e(_bytes("stringTest('\\\\xe9lan')").decode("unicode_escape"))
u'Hello \\xe9lan'
>>> e("stringTest('you','there')")
Traceback (most recent call last):
...
TypeError: stringTest() takes exactly 2 arguments (3 given)
>>> e("floatTest(2)")
6.0
>>> e("booleanTest(true())")
False
>>> list(map(tag, e("setTest(/body/tag)")))
['tag']
>>> list(map(tag, e("setTest2(/body/*)")))
['tag', 'section']
>>> e("argsTest1('a',1.5,true(),/body/tag)")
"a, 1.5, True, ['tag', 'tag', 'tag']"
>>> list(map(tag, e("argsTest2(/body/tag, /body/section)")))
['tag', 'section', 'tag', 'tag']
>>> e("resultTypesTest()")
Traceback (most recent call last):
...
XPathResultError: This is not a node: 'x'
>>> try:
... e("resultTypesTest2()")
... except etree.XPathResultError:
... print("Got error")
Got error
"""
if sys.version_info[0] >= 3:
xpath.__doc__ = xpath.__doc__.replace(" u'", " '")
xpath.__doc__ = xpath.__doc__.replace(" XPathResultError",
" lxml.etree.XPathResultError")
xpath.__doc__ = xpath.__doc__.replace(" exactly 2 arguments",
" exactly 2 positional arguments")
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeXPathTestCase)])
suite.addTests([unittest.makeSuite(ETreeXPathClassTestCase)])
suite.addTests([unittest.makeSuite(ETreeETXPathClassTestCase)])
suite.addTests([doctest.DocTestSuite()])
suite.addTests(
[make_doctest('../../../doc/xpathxslt.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/lxml/src/lxml/tests/test_xpathevaluator.py | Python | gpl-2.0 | 23,244 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos ([email protected]) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
| oihane/odoomrp-wip | mrp_byproduct_operations/__init__.py | Python | agpl-3.0 | 945 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 114,
'age_limit': 0,
},
},
{
'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith('Files'):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
continue
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
'tbr': tbr,
'preference': preference,
})
extract_formats(video_xml.find('./Clip'))
self._sort_formats(formats)
return {
'id': video_id,
'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
}
| maleficarium/youtube-dl | youtube_dl/extractor/ruutu.py | Python | unlicense | 4,297 |
"""
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from sentry.models import Release
from sentry.plugins import ReleaseHook
from sentry.testutils import TestCase
class StartReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
class FinishReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.finish_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_released
| Kryz/sentry | tests/sentry/plugins/interfaces/test_releasehook.py | Python | bsd-3-clause | 1,132 |
def test_local_variable():
x = 1
x = 2 | asedunov/intellij-community | python/testData/inspections/PyRedeclarationInspection/localVariable.py | Python | apache-2.0 | 46 |
##
## * << Haru Free PDF Library 2.0.8 >> -- hpdf.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os
import sys
import types
def setpath():
dllpath='%s/dll' %(os.path.dirname(os.path.realpath(__file__)))
if 'PATH' in os.environ:
if dllpath not in os.environ['PATH']:
os.environ['PATH']='%s;%s' % (dllpath, os.environ['PATH'])
else:
os.environ['PATH']=dllpath
setpath()
from hpdf_consts import *
from hpdf_types import *
if os.sys.platform=='win32':
harudll='libhpdf.dll'
#haru=WinDLL(harudll)
haru=CDLL(harudll)
else:
harudll='libhpdf.so'
haru=CDLL(harudll)
HPDF_HANDLE=c_void_p
HPDF_Doc=HPDF_HANDLE
HPDF_Page=HPDF_HANDLE
HPDF_Pages=HPDF_HANDLE
HPDF_Stream=HPDF_HANDLE
HPDF_Image=HPDF_HANDLE
HPDF_Font=HPDF_HANDLE
HPDF_Outline=HPDF_HANDLE
HPDF_Encoder=HPDF_HANDLE
HPDF_Destination=HPDF_HANDLE
HPDF_XObject=HPDF_HANDLE
HPDF_Annotation=HPDF_HANDLE
HPDF_ExtGState=HPDF_HANDLE
#const char * HPDF_GetVersion (void)
HPDF_GetVersion=haru.HPDF_GetVersion
HPDF_GetVersion.restype=c_char_p
#HPDF_Doc HPDF_NewEx (HPDF_Error_Handler user_error_fn, HPDF_Alloc_Func user_alloc_fn, HPDF_Free_Func user_free_fn, HPDF_UINT mem_pool_buf_size, void *user_data)
HPDF_NewEx=haru.HPDF_NewEx
HPDF_NewEx.restype=HPDF_Doc
#HPDF_Doc HPDF_New (HPDF_Error_Handler user_error_fn, void *user_data)
HPDF_New=haru.HPDF_New
HPDF_New.restype=HPDF_Doc
#HPDF_STATUS HPDF_SetErrorHandler (HPDF_Doc pdf, HPDF_Error_Handler user_error_fn)
HPDF_SetErrorHandler=haru.HPDF_SetErrorHandler
HPDF_SetErrorHandler.restype=HPDF_STATUS
#void HPDF_Free (HPDF_Doc pdf)
HPDF_Free=haru.HPDF_Free
HPDF_Free.restype=None
#HPDF_STATUS HPDF_NewDoc (HPDF_Doc pdf)
HPDF_NewDoc=haru.HPDF_NewDoc
HPDF_NewDoc.restype=HPDF_STATUS
#void HPDF_FreeDoc (HPDF_Doc pdf)
HPDF_FreeDoc=haru.HPDF_FreeDoc
HPDF_FreeDoc.restype=None
#HPDF_BOOL HPDF_HasDoc (HPDF_Doc pdf)
HPDF_HasDoc=haru.HPDF_HasDoc
HPDF_HasDoc.restype=HPDF_BOOL
#void HPDF_FreeDocAll (HPDF_Doc pdf)
HPDF_FreeDocAll=haru.HPDF_FreeDocAll
HPDF_FreeDocAll.restype=None
#HPDF_STATUS HPDF_SaveToStream (HPDF_Doc pdf)
HPDF_SaveToStream=haru.HPDF_SaveToStream
HPDF_SaveToStream.restype=HPDF_STATUS
#HPDF_UINT32 HPDF_GetStreamSize (HPDF_Doc pdf)
HPDF_GetStreamSize=haru.HPDF_GetStreamSize
HPDF_GetStreamSize.restype=HPDF_UINT32
#HPDF_STATUS HPDF_ReadFromStream (HPDF_Doc pdf, HPDF_BYTE *buf, HPDF_UINT32 *size)
_HPDF_ReadFromStream=haru.HPDF_ReadFromStream
_HPDF_ReadFromStream.restype=HPDF_STATUS
def HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
size=HPDF_UINT32(int(size))
return _HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
)
#HPDF_STATUS HPDF_ResetStream (HPDF_Doc pdf)
HPDF_ResetStream=haru.HPDF_ResetStream
HPDF_ResetStream.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SaveToFile (HPDF_Doc pdf, const char *file_name)
HPDF_SaveToFile=haru.HPDF_SaveToFile
HPDF_SaveToFile.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetError (HPDF_Doc pdf)
HPDF_GetError=haru.HPDF_GetError
HPDF_GetError.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetErrorDetail (HPDF_Doc pdf)
HPDF_GetErrorDetail=haru.HPDF_GetErrorDetail
HPDF_GetErrorDetail.restype=HPDF_STATUS
#void HPDF_ResetError (HPDF_Doc pdf)
HPDF_ResetError=haru.HPDF_ResetError
HPDF_ResetError.restype=None
#HPDF_STATUS HPDF_SetPagesConfiguration (HPDF_Doc pdf, HPDF_UINT page_per_pages)
_HPDF_SetPagesConfiguration=haru.HPDF_SetPagesConfiguration
_HPDF_SetPagesConfiguration.restype=HPDF_STATUS
def HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
):
page_per_pages=HPDF_UINT(int(page_per_pages))
return _HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
)
#HPDF_Page HPDF_GetPageByIndex (HPDF_Doc pdf, HPDF_UINT index)
HPDF_GetPageByIndex=haru.HPDF_GetPageByIndex
HPDF_GetPageByIndex.restype=HPDF_Page
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#HPDF_PageLayout HPDF_GetPageLayout (HPDF_Doc pdf)
HPDF_GetPageLayout=haru.HPDF_GetPageLayout
HPDF_GetPageLayout.restype=HPDF_PageLayout
#HPDF_STATUS HPDF_SetPageLayout (HPDF_Doc pdf, HPDF_PageLayout layout)
HPDF_SetPageLayout=haru.HPDF_SetPageLayout
HPDF_SetPageLayout.restype=HPDF_STATUS
#HPDF_PageMode HPDF_GetPageMode (HPDF_Doc pdf)
HPDF_GetPageMode=haru.HPDF_GetPageMode
HPDF_GetPageMode.restype=HPDF_PageMode
#HPDF_STATUS HPDF_SetPageMode (HPDF_Doc pdf, HPDF_PageMode mode)
HPDF_SetPageMode=haru.HPDF_SetPageMode
HPDF_SetPageMode.restype=HPDF_STATUS
#HPDF_UINT HPDF_GetViewerPreference (HPDF_Doc pdf)
HPDF_GetViewerPreference=haru.HPDF_GetViewerPreference
HPDF_GetViewerPreference.restype=HPDF_UINT
#HPDF_STATUS HPDF_SetViewerPreference (HPDF_Doc pdf, HPDF_UINT value)
HPDF_SetViewerPreference=haru.HPDF_SetViewerPreference
HPDF_SetViewerPreference.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetOpenAction (HPDF_Doc pdf, HPDF_Destination open_action)
HPDF_SetOpenAction=haru.HPDF_SetOpenAction
HPDF_SetOpenAction.restype=HPDF_STATUS
#---------------------------------------------------------------------------
#----- page handling -------------------------------------------------------
#HPDF_Page HPDF_GetCurrentPage (HPDF_Doc pdf)
HPDF_GetCurrentPage=haru.HPDF_GetCurrentPage
HPDF_GetCurrentPage.restype=HPDF_Page
#HPDF_Page HPDF_AddPage (HPDF_Doc pdf)
HPDF_AddPage=haru.HPDF_AddPage
HPDF_AddPage.restype=HPDF_Page
#HPDF_Page HPDF_InsertPage (HPDF_Doc pdf, HPDF_Page page)
HPDF_InsertPage=haru.HPDF_InsertPage
HPDF_InsertPage.restype=HPDF_Page
#HPDF_STATUS HPDF_Page_SetWidth (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWidth=haru.HPDF_Page_SetWidth
_HPDF_Page_SetWidth.restype=HPDF_STATUS
def HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_SetHeight (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHeight=haru.HPDF_Page_SetHeight
_HPDF_Page_SetHeight.restype=HPDF_STATUS
def HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS
#HPDF_Page_SetSize (HPDF_Page page,
# HPDF_PageSizes size,
# HPDF_PageDirection direction);
HPDF_Page_SetSize=haru.HPDF_Page_SetSize
HPDF_Page_SetSize.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Page_SetRotate (HPDF_Page page, HPDF_UINT16 angle)
_HPDF_Page_SetRotate=haru.HPDF_Page_SetRotate
_HPDF_Page_SetRotate.restype=HPDF_STATUS
def HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
):
angle=HPDF_UINT16(int(angle))
return _HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
)
#---------------------------------------------------------------------------
#----- font handling -------------------------------------------------------
#HPDF_Font HPDF_GetFont (HPDF_Doc pdf, const char *font_name, const char *encoding_name)
HPDF_GetFont=haru.HPDF_GetFont
HPDF_GetFont.restype=HPDF_Font
#const char* HPDF_LoadType1FontFromFile (HPDF_Doc pdf, const char *afm_file_name, const char *data_file_name)
HPDF_LoadType1FontFromFile=haru.HPDF_LoadType1FontFromFile
HPDF_LoadType1FontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile (HPDF_Doc pdf, const char *file_name, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile=haru.HPDF_LoadTTFontFromFile
HPDF_LoadTTFontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile2 (HPDF_Doc pdf, const char *file_name, HPDF_UINT index, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile2=haru.HPDF_LoadTTFontFromFile2
HPDF_LoadTTFontFromFile2.restype=c_char_p
#HPDF_STATUS HPDF_AddPageLabel (HPDF_Doc pdf, HPDF_UINT page_num, HPDF_PageNumStyle style, HPDF_UINT first_page, const char *prefix)
_HPDF_AddPageLabel=haru.HPDF_AddPageLabel
_HPDF_AddPageLabel.restype=HPDF_STATUS
def HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
):
page_num, first_page=[HPDF_UINT(int(i))for i in (page_num, first_page)]
return _HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
)
#HPDF_STATUS HPDF_UseJPFonts (HPDF_Doc pdf)
HPDF_UseJPFonts=haru.HPDF_UseJPFonts
HPDF_UseJPFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKRFonts (HPDF_Doc pdf)
HPDF_UseKRFonts=haru.HPDF_UseKRFonts
HPDF_UseKRFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSFonts (HPDF_Doc pdf)
HPDF_UseCNSFonts=haru.HPDF_UseCNSFonts
HPDF_UseCNSFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTFonts (HPDF_Doc pdf)
HPDF_UseCNTFonts=haru.HPDF_UseCNTFonts
HPDF_UseCNTFonts.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- outline ------------------------------------------------------------
#HPDF_Outline HPDF_CreateOutline (HPDF_Doc pdf, HPDF_Outline parent, const char *title, HPDF_Encoder encoder)
HPDF_CreateOutline=haru.HPDF_CreateOutline
HPDF_CreateOutline.restype=HPDF_Outline
#HPDF_STATUS HPDF_Outline_SetOpened (HPDF_Outline outline, HPDF_BOOL opened)
HPDF_Outline_SetOpened=haru.HPDF_Outline_SetOpened
HPDF_Outline_SetOpened.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Outline_SetDestination (HPDF_Outline outline, HPDF_Destination dst)
HPDF_Outline_SetDestination=haru.HPDF_Outline_SetDestination
HPDF_Outline_SetDestination.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- destination --------------------------------------------------------
#HPDF_Destination HPDF_Page_CreateDestination (HPDF_Page page)
HPDF_Page_CreateDestination=haru.HPDF_Page_CreateDestination
HPDF_Page_CreateDestination.restype=HPDF_Destination
#HPDF_STATUS HPDF_Destination_SetXYZ (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL top, HPDF_REAL zoom)
_HPDF_Destination_SetXYZ=haru.HPDF_Destination_SetXYZ
_HPDF_Destination_SetXYZ.restype=HPDF_STATUS
def HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
zoom=HPDF_REAL(zoom)
return _HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFit (HPDF_Destination dst)
HPDF_Destination_SetFit=haru.HPDF_Destination_SetFit
HPDF_Destination_SetFit.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitH=haru.HPDF_Destination_SetFitH
_HPDF_Destination_SetFitH.restype=HPDF_STATUS
def HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitV=haru.HPDF_Destination_SetFitV
_HPDF_Destination_SetFitV.restype=HPDF_STATUS
def HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitR (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL bottom, HPDF_REAL right, HPDF_REAL top)
_HPDF_Destination_SetFitR=haru.HPDF_Destination_SetFitR
_HPDF_Destination_SetFitR.restype=HPDF_STATUS
def HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
):
left=HPDF_REAL(left)
bottom=HPDF_REAL(bottom)
right=HPDF_REAL(right)
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitB (HPDF_Destination dst)
HPDF_Destination_SetFitB=haru.HPDF_Destination_SetFitB
HPDF_Destination_SetFitB.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitBH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitBH=haru.HPDF_Destination_SetFitBH
_HPDF_Destination_SetFitBH.restype=HPDF_STATUS
def HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitBV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitBV=haru.HPDF_Destination_SetFitBV
_HPDF_Destination_SetFitBV.restype=HPDF_STATUS
def HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#--------------------------------------------------------------------------
#----- encoder ------------------------------------------------------------
#HPDF_Encoder HPDF_GetEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_GetEncoder=haru.HPDF_GetEncoder
HPDF_GetEncoder.restype=HPDF_Encoder
#HPDF_Encoder HPDF_GetCurrentEncoder (HPDF_Doc pdf)
HPDF_GetCurrentEncoder=haru.HPDF_GetCurrentEncoder
HPDF_GetCurrentEncoder.restype=HPDF_Encoder
#HPDF_STATUS HPDF_SetCurrentEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_SetCurrentEncoder=haru.HPDF_SetCurrentEncoder
HPDF_SetCurrentEncoder.restype=HPDF_STATUS
#HPDF_EncoderType HPDF_Encoder_GetType (HPDF_Encoder encoder)
HPDF_Encoder_GetType=haru.HPDF_Encoder_GetType
HPDF_Encoder_GetType.restype=HPDF_EncoderType
#HPDF_ByteType HPDF_Encoder_GetByteType (HPDF_Encoder encoder, const char *text, HPDF_UINT index)
_HPDF_Encoder_GetByteType=haru.HPDF_Encoder_GetByteType
_HPDF_Encoder_GetByteType.restype=HPDF_ByteType
def HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
)
#HPDF_UNICODE HPDF_Encoder_GetUnicode (HPDF_Encoder encoder, HPDF_UINT16 code)
HPDF_Encoder_GetUnicode=haru.HPDF_Encoder_GetUnicode
HPDF_Encoder_GetUnicode.restype=HPDF_UNICODE
#HPDF_WritingMode HPDF_Encoder_GetWritingMode (HPDF_Encoder encoder)
HPDF_Encoder_GetWritingMode=haru.HPDF_Encoder_GetWritingMode
HPDF_Encoder_GetWritingMode.restype=HPDF_WritingMode
#HPDF_STATUS HPDF_UseJPEncodings (HPDF_Doc pdf)
HPDF_UseJPEncodings=haru.HPDF_UseJPEncodings
HPDF_UseJPEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKREncodings (HPDF_Doc pdf)
HPDF_UseKREncodings=haru.HPDF_UseKREncodings
HPDF_UseKREncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSEncodings (HPDF_Doc pdf)
HPDF_UseCNSEncodings=haru.HPDF_UseCNSEncodings
HPDF_UseCNSEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTEncodings (HPDF_Doc pdf)
HPDF_UseCNTEncodings=haru.HPDF_UseCNTEncodings
HPDF_UseCNTEncodings.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- annotation ---------------------------------------------------------
#HPDF_Annotation HPDF_Page_CreateTextAnnot (HPDF_Page page, HPDF_Rect rect, const char *text, HPDF_Encoder encoder)
HPDF_Page_CreateTextAnnot=haru.HPDF_Page_CreateTextAnnot
HPDF_Page_CreateTextAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateLinkAnnot (HPDF_Page page, HPDF_Rect rect, HPDF_Destination dst)
HPDF_Page_CreateLinkAnnot=haru.HPDF_Page_CreateLinkAnnot
HPDF_Page_CreateLinkAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateURILinkAnnot (HPDF_Page page, HPDF_Rect rect, const char *uri)
HPDF_Page_CreateURILinkAnnot=haru.HPDF_Page_CreateURILinkAnnot
HPDF_Page_CreateURILinkAnnot.restype=HPDF_Annotation
#HPDF_STATUS HPDF_LinkAnnot_SetHighlightMode (HPDF_Annotation annot, HPDF_AnnotHighlightMode mode)
HPDF_LinkAnnot_SetHighlightMode=haru.HPDF_LinkAnnot_SetHighlightMode
HPDF_LinkAnnot_SetHighlightMode.restype=HPDF_STATUS
#HPDF_STATUS HPDF_LinkAnnot_SetBorderStyle (HPDF_Annotation annot, HPDF_REAL width, HPDF_UINT16 dash_on, HPDF_UINT16 dash_off)
_HPDF_LinkAnnot_SetBorderStyle=haru.HPDF_LinkAnnot_SetBorderStyle
_HPDF_LinkAnnot_SetBorderStyle.restype=HPDF_STATUS
def HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
):
width=HPDF_REAL(width)
dash_on=HPDF_UINT16(dash_on)
dash_off=HPDF_UINT16(dash_off)
return _HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
)
#HPDF_STATUS HPDF_TextAnnot_SetIcon (HPDF_Annotation annot, HPDF_AnnotIcon icon)
HPDF_TextAnnot_SetIcon=haru.HPDF_TextAnnot_SetIcon
HPDF_TextAnnot_SetIcon.restype=HPDF_STATUS
#HPDF_STATUS HPDF_TextAnnot_SetOpened (HPDF_Annotation annot, HPDF_BOOL opened)
HPDF_TextAnnot_SetOpened=haru.HPDF_TextAnnot_SetOpened
HPDF_TextAnnot_SetOpened.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- image data ---------------------------------------------------------
#HPDF_Image HPDF_LoadPngImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile=haru.HPDF_LoadPngImageFromFile
HPDF_LoadPngImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadPngImageFromFile2 (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile2=haru.HPDF_LoadPngImageFromFile2
HPDF_LoadPngImageFromFile2.restype=HPDF_Image
#HPDF_Image HPDF_LoadJpegImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadJpegImageFromFile=haru.HPDF_LoadJpegImageFromFile
HPDF_LoadJpegImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadRawImageFromFile (HPDF_Doc pdf, const char *filename, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space)
_HPDF_LoadRawImageFromFile=haru.HPDF_LoadRawImageFromFile
_HPDF_LoadRawImageFromFile.restype=HPDF_Image
def HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
):
width=HPDF_UINT(width)
height=HPDF_UINT(height)
return _HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
)
#HPDF_Image HPDF_LoadRawImageFromMem (HPDF_Doc pdf, const HPDF_BYTE *buf, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space, HPDF_UINT bits_per_component)
_HPDF_LoadRawImageFromMem=haru.HPDF_LoadRawImageFromMem
_HPDF_LoadRawImageFromMem.restype=HPDF_Image
def HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
if height in [0, None]:
height=size/width
width=HPDF_UINT(width)
height=HPDF_UINT(height)
bits_per_component=HPDF_UINT(bits_per_component)
return _HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
)
#HPDF_Point HPDF_Image_GetSize (HPDF_Image image)
HPDF_Image_GetSize=haru.HPDF_Image_GetSize
HPDF_Image_GetSize.restype=HPDF_Point
#HPDF_STATUS HPDF_Image_GetSize2 (HPDF_Image image, HPDF_Point *size)
_HPDF_Image_GetSize2=haru.HPDF_Image_GetSize2
_HPDF_Image_GetSize2.restype=HPDF_STATUS
def HPDF_Image_GetSize2(
image, #HPDF_Image
size=None, #POINTER(HPDF_Point)
):
size=HPDF_Point
ret= _HPDF_Image_GetSize2(
image, #HPDF_Image
size, #POINTER(HPDF_Point)
)
return ret, size.x, size.y
#HPDF_UINT HPDF_Image_GetWidth (HPDF_Image image)
HPDF_Image_GetWidth=haru.HPDF_Image_GetWidth
HPDF_Image_GetWidth.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetHeight (HPDF_Image image)
HPDF_Image_GetHeight=haru.HPDF_Image_GetHeight
HPDF_Image_GetHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetBitsPerComponent (HPDF_Image image)
HPDF_Image_GetBitsPerComponent=haru.HPDF_Image_GetBitsPerComponent
HPDF_Image_GetBitsPerComponent.restype=HPDF_UINT
#const char* HPDF_Image_GetColorSpace (HPDF_Image image)
HPDF_Image_GetColorSpace=haru.HPDF_Image_GetColorSpace
HPDF_Image_GetColorSpace.restype=c_char_p
#HPDF_STATUS HPDF_Image_SetColorMask (HPDF_Image image, HPDF_UINT rmin, HPDF_UINT rmax, HPDF_UINT gmin, HPDF_UINT gmax, HPDF_UINT bmin, HPDF_UINT bmax)
_HPDF_Image_SetColorMask=haru.HPDF_Image_SetColorMask
_HPDF_Image_SetColorMask.restype=HPDF_STATUS
def HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
):
rmin=HPDF_UINT(rmin)
rmax=HPDF_UINT(rmax)
gmin=HPDF_UINT(gmin)
gmax=HPDF_UINT(gmax)
bmin=HPDF_UINT(bmin)
bmax=HPDF_UINT(bmax)
return _HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
)
#HPDF_STATUS HPDF_Image_SetMaskImage (HPDF_Image image, HPDF_Image mask_image)
HPDF_Image_SetMaskImage=haru.HPDF_Image_SetMaskImage
HPDF_Image_SetMaskImage.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- info dictionary ----------------------------------------------------
#HPDF_STATUS HPDF_SetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type, const char *value)
HPDF_SetInfoAttr=haru.HPDF_SetInfoAttr
HPDF_SetInfoAttr.restype=HPDF_STATUS
#const char* HPDF_GetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type)
HPDF_GetInfoAttr=haru.HPDF_GetInfoAttr
HPDF_GetInfoAttr.restype=c_char_p
#HPDF_STATUS HPDF_SetInfoDateAttr (HPDF_Doc pdf, HPDF_InfoType type, HPDF_Date value)
HPDF_SetInfoDateAttr=haru.HPDF_SetInfoDateAttr
HPDF_SetInfoDateAttr.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- encryption ---------------------------------------------------------
#HPDF_STATUS HPDF_SetPassword (HPDF_Doc pdf, const char *owner_passwd, const char *user_passwd)
HPDF_SetPassword=haru.HPDF_SetPassword
HPDF_SetPassword.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetPermission (HPDF_Doc pdf, HPDF_UINT permission)
_HPDF_SetPermission=haru.HPDF_SetPermission
_HPDF_SetPermission.restype=HPDF_STATUS
def HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
):
permission=HPDF_UINT(int(permission))
return _HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
)
#HPDF_STATUS HPDF_SetEncryptionMode (HPDF_Doc pdf, HPDF_EncryptMode mode, HPDF_UINT key_len)
_HPDF_SetEncryptionMode=haru.HPDF_SetEncryptionMode
_HPDF_SetEncryptionMode.restype=HPDF_STATUS
def HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
):
key_len=HPDF_UINT(int(key_len))
return _HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
)
#--------------------------------------------------------------------------
#----- compression --------------------------------------------------------
#HPDF_STATUS HPDF_SetCompressionMode (HPDF_Doc pdf, HPDF_UINT mode)
HPDF_SetCompressionMode=haru.HPDF_SetCompressionMode
HPDF_SetCompressionMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- font ---------------------------------------------------------------
#const char* HPDF_Font_GetFontName (HPDF_Font font)
HPDF_Font_GetFontName=haru.HPDF_Font_GetFontName
HPDF_Font_GetFontName.restype=c_char_p
#const char* HPDF_Font_GetEncodingName (HPDF_Font font)
HPDF_Font_GetEncodingName=haru.HPDF_Font_GetEncodingName
HPDF_Font_GetEncodingName.restype=c_char_p
#HPDF_INT HPDF_Font_GetUnicodeWidth (HPDF_Font font, HPDF_UNICODE code)
HPDF_Font_GetUnicodeWidth=haru.HPDF_Font_GetUnicodeWidth
HPDF_Font_GetUnicodeWidth.restype=HPDF_INT
#HPDF_Box HPDF_Font_GetBBox (HPDF_Font font)
HPDF_Font_GetBBox=haru.HPDF_Font_GetBBox
HPDF_Font_GetBBox.restype=HPDF_Box
#HPDF_INT HPDF_Font_GetAscent (HPDF_Font font)
HPDF_Font_GetAscent=haru.HPDF_Font_GetAscent
HPDF_Font_GetAscent.restype=HPDF_INT
#HPDF_INT HPDF_Font_GetDescent (HPDF_Font font)
HPDF_Font_GetDescent=haru.HPDF_Font_GetDescent
HPDF_Font_GetDescent.restype=HPDF_INT
#HPDF_UINT HPDF_Font_GetXHeight (HPDF_Font font)
HPDF_Font_GetXHeight=haru.HPDF_Font_GetXHeight
HPDF_Font_GetXHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Font_GetCapHeight (HPDF_Font font)
HPDF_Font_GetCapHeight=haru.HPDF_Font_GetCapHeight
HPDF_Font_GetCapHeight.restype=HPDF_UINT
#HPDF_TextWidth HPDF_Font_TextWidth (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len)
HPDF_Font_TextWidth=haru.HPDF_Font_TextWidth
HPDF_Font_TextWidth.restype=HPDF_TextWidth
#HPDF_UINT HPDF_Font_MeasureText (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len, HPDF_REAL width, HPDF_REAL font_size, HPDF_REAL char_space, HPDF_REAL word_space, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Font_MeasureText=haru.HPDF_Font_MeasureText
_HPDF_Font_MeasureText.restype=HPDF_UINT
def HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
if type(text) in (types.TupleType, types.ListType):
length=len(text)
text=pointer((HPDF_BYTE*length)(*text))
length=HPDF_UINT(int(length))
width=HPDF_REAL(width)
font_size=HPDF_REAL(font_size)
char_space=HPDF_REAL(char_space)
word_space=HPDF_REAL(word_space)
real_width=HPDF_REAL(real_width)
return _HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#--------------------------------------------------------------------------
#----- extended graphics state --------------------------------------------
#HPDF_ExtGState HPDF_CreateExtGState (HPDF_Doc pdf)
HPDF_CreateExtGState=haru.HPDF_CreateExtGState
HPDF_CreateExtGState.restype=HPDF_ExtGState
#HPDF_STATUS HPDF_ExtGState_SetAlphaStroke (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaStroke=haru.HPDF_ExtGState_SetAlphaStroke
_HPDF_ExtGState_SetAlphaStroke.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetAlphaFill (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaFill=haru.HPDF_ExtGState_SetAlphaFill
_HPDF_ExtGState_SetAlphaFill.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetBlendMode (HPDF_ExtGState ext_gstate, HPDF_BlendMode mode)
HPDF_ExtGState_SetBlendMode=haru.HPDF_ExtGState_SetBlendMode
HPDF_ExtGState_SetBlendMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#HPDF_REAL HPDF_Page_TextWidth (HPDF_Page page, const char *text)
_HPDF_Page_TextWidth=haru.HPDF_Page_TextWidth
_HPDF_Page_TextWidth.restype=HPDF_REAL
def HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
)
#HPDF_UINT HPDF_Page_MeasureText (HPDF_Page page, const char *text, HPDF_REAL width, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Page_MeasureText=haru.HPDF_Page_MeasureText
_HPDF_Page_MeasureText.restype=HPDF_UINT
def HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
width=HPDF_REAL(width)
real_width=HPDF_REAL(real_width)
return _HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#HPDF_REAL
#HPDF_Page_GetWidth (HPDF_Page page);
HPDF_Page_GetWidth=haru.HPDF_Page_GetWidth
HPDF_Page_GetWidth.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHeight (HPDF_Page page)
HPDF_Page_GetHeight=haru.HPDF_Page_GetHeight
HPDF_Page_GetHeight.restype=HPDF_REAL
#HPDF_UINT16 HPDF_Page_GetGMode (HPDF_Page page)
HPDF_Page_GetGMode=haru.HPDF_Page_GetGMode
HPDF_Page_GetGMode.restype=HPDF_UINT16
#HPDF_Point HPDF_Page_GetCurrentPos (HPDF_Page page)
HPDF_Page_GetCurrentPos=haru.HPDF_Page_GetCurrentPos
HPDF_Page_GetCurrentPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentPos2=haru.HPDF_Page_GetCurrentPos2
_HPDF_Page_GetCurrentPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Point HPDF_Page_GetCurrentTextPos (HPDF_Page page)
HPDF_Page_GetCurrentTextPos=haru.HPDF_Page_GetCurrentTextPos
HPDF_Page_GetCurrentTextPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentTextPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentTextPos2=haru.HPDF_Page_GetCurrentTextPos2
_HPDF_Page_GetCurrentTextPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Font HPDF_Page_GetCurrentFont (HPDF_Page page)
HPDF_Page_GetCurrentFont=haru.HPDF_Page_GetCurrentFont
HPDF_Page_GetCurrentFont.restype=HPDF_Font
#HPDF_REAL HPDF_Page_GetCurrentFontSize (HPDF_Page page)
HPDF_Page_GetCurrentFontSize=haru.HPDF_Page_GetCurrentFontSize
HPDF_Page_GetCurrentFontSize.restype=HPDF_REAL
#HPDF_TransMatrix HPDF_Page_GetTransMatrix (HPDF_Page page)
HPDF_Page_GetTransMatrix=haru.HPDF_Page_GetTransMatrix
HPDF_Page_GetTransMatrix.restype=HPDF_TransMatrix
#HPDF_REAL HPDF_Page_GetLineWidth (HPDF_Page page)
HPDF_Page_GetLineWidth=haru.HPDF_Page_GetLineWidth
HPDF_Page_GetLineWidth.restype=HPDF_REAL
#HPDF_LineCap HPDF_Page_GetLineCap (HPDF_Page page)
HPDF_Page_GetLineCap=haru.HPDF_Page_GetLineCap
HPDF_Page_GetLineCap.restype=HPDF_LineCap
#HPDF_LineJoin HPDF_Page_GetLineJoin (HPDF_Page page)
HPDF_Page_GetLineJoin=haru.HPDF_Page_GetLineJoin
HPDF_Page_GetLineJoin.restype=HPDF_LineJoin
#HPDF_REAL HPDF_Page_GetMiterLimit (HPDF_Page page)
HPDF_Page_GetMiterLimit=haru.HPDF_Page_GetMiterLimit
HPDF_Page_GetMiterLimit.restype=HPDF_REAL
#HPDF_DashMode HPDF_Page_GetDash (HPDF_Page page)
HPDF_Page_GetDash=haru.HPDF_Page_GetDash
HPDF_Page_GetDash.restype=HPDF_DashMode
#HPDF_REAL HPDF_Page_GetFlat (HPDF_Page page)
HPDF_Page_GetFlat=haru.HPDF_Page_GetFlat
HPDF_Page_GetFlat.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetCharSpace (HPDF_Page page)
HPDF_Page_GetCharSpace=haru.HPDF_Page_GetCharSpace
HPDF_Page_GetCharSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetWordSpace (HPDF_Page page)
HPDF_Page_GetWordSpace=haru.HPDF_Page_GetWordSpace
HPDF_Page_GetWordSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHorizontalScalling (HPDF_Page page)
HPDF_Page_GetHorizontalScalling=haru.HPDF_Page_GetHorizontalScalling
HPDF_Page_GetHorizontalScalling.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextLeading (HPDF_Page page)
HPDF_Page_GetTextLeading=haru.HPDF_Page_GetTextLeading
HPDF_Page_GetTextLeading.restype=HPDF_REAL
#HPDF_TextRenderingMode HPDF_Page_GetTextRenderingMode (HPDF_Page page)
HPDF_Page_GetTextRenderingMode=haru.HPDF_Page_GetTextRenderingMode
HPDF_Page_GetTextRenderingMode.restype=HPDF_TextRenderingMode
# This function is obsolete. Use HPDF_Page_GetTextRise.
#HPDF_REAL HPDF_Page_GetTextRaise (HPDF_Page page)
HPDF_Page_GetTextRaise=haru.HPDF_Page_GetTextRaise
HPDF_Page_GetTextRaise.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextRise (HPDF_Page page)
HPDF_Page_GetTextRise=haru.HPDF_Page_GetTextRise
HPDF_Page_GetTextRise.restype=HPDF_REAL
#HPDF_RGBColor HPDF_Page_GetRGBFill (HPDF_Page page)
HPDF_Page_GetRGBFill=haru.HPDF_Page_GetRGBFill
HPDF_Page_GetRGBFill.restype=HPDF_RGBColor
#HPDF_RGBColor HPDF_Page_GetRGBStroke (HPDF_Page page)
HPDF_Page_GetRGBStroke=haru.HPDF_Page_GetRGBStroke
HPDF_Page_GetRGBStroke.restype=HPDF_RGBColor
#HPDF_CMYKColor HPDF_Page_GetCMYKFill (HPDF_Page page)
HPDF_Page_GetCMYKFill=haru.HPDF_Page_GetCMYKFill
HPDF_Page_GetCMYKFill.restype=HPDF_CMYKColor
#HPDF_CMYKColor HPDF_Page_GetCMYKStroke (HPDF_Page page)
HPDF_Page_GetCMYKStroke=haru.HPDF_Page_GetCMYKStroke
HPDF_Page_GetCMYKStroke.restype=HPDF_CMYKColor
#HPDF_REAL HPDF_Page_GetGrayFill (HPDF_Page page)
HPDF_Page_GetGrayFill=haru.HPDF_Page_GetGrayFill
HPDF_Page_GetGrayFill.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetGrayStroke (HPDF_Page page)
HPDF_Page_GetGrayStroke=haru.HPDF_Page_GetGrayStroke
HPDF_Page_GetGrayStroke.restype=HPDF_REAL
#HPDF_ColorSpace HPDF_Page_GetStrokingColorSpace (HPDF_Page page)
HPDF_Page_GetStrokingColorSpace=haru.HPDF_Page_GetStrokingColorSpace
HPDF_Page_GetStrokingColorSpace.restype=HPDF_ColorSpace
#HPDF_ColorSpace HPDF_Page_GetFillingColorSpace (HPDF_Page page)
HPDF_Page_GetFillingColorSpace=haru.HPDF_Page_GetFillingColorSpace
HPDF_Page_GetFillingColorSpace.restype=HPDF_ColorSpace
#HPDF_TransMatrix HPDF_Page_GetTextMatrix (HPDF_Page page)
HPDF_Page_GetTextMatrix=haru.HPDF_Page_GetTextMatrix
HPDF_Page_GetTextMatrix.restype=HPDF_TransMatrix
#HPDF_UINT HPDF_Page_GetGStateDepth (HPDF_Page page)
HPDF_Page_GetGStateDepth=haru.HPDF_Page_GetGStateDepth
HPDF_Page_GetGStateDepth.restype=HPDF_UINT
#--------------------------------------------------------------------------
#----- GRAPHICS OPERATORS -------------------------------------------------
#--- General graphics state ---------------------------------------------
# w
#HPDF_STATUS HPDF_Page_SetLineWidth (HPDF_Page page, HPDF_REAL line_width)
_HPDF_Page_SetLineWidth=haru.HPDF_Page_SetLineWidth
_HPDF_Page_SetLineWidth.restype=HPDF_STATUS
def HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
):
line_width=HPDF_REAL(line_width)
return _HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
)
# J
#HPDF_STATUS HPDF_Page_SetLineCap (HPDF_Page page, HPDF_LineCap line_cap)
HPDF_Page_SetLineCap=haru.HPDF_Page_SetLineCap
HPDF_Page_SetLineCap.restype=HPDF_STATUS
# j
#HPDF_STATUS HPDF_Page_SetLineJoin (HPDF_Page page, HPDF_LineJoin line_join)
HPDF_Page_SetLineJoin=haru.HPDF_Page_SetLineJoin
HPDF_Page_SetLineJoin.restype=HPDF_STATUS
# M
#HPDF_STATUS HPDF_Page_SetMiterLimit (HPDF_Page page, HPDF_REAL miter_limit)
_HPDF_Page_SetMiterLimit=haru.HPDF_Page_SetMiterLimit
_HPDF_Page_SetMiterLimit.restype=HPDF_STATUS
def HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
):
miter_limit=HPDF_REAL(miter_limit)
return _HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
)
# d
#HPDF_STATUS HPDF_Page_SetDash (HPDF_Page page, const HPDF_UINT16 *dash_ptn, HPDF_UINT num_param, HPDF_UINT phase)
_HPDF_Page_SetDash=haru.HPDF_Page_SetDash
_HPDF_Page_SetDash.restype=HPDF_STATUS
def HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
):
if type(dash_ptn) in (types.ListType, types.TupleType):
num_param=len(dash_ptn)
dash_ptn=pointer((HPDF_UINT16*num_param)(*dash_ptn))
return _HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
)
# ri --not implemented yet
# i
#HPDF_STATUS HPDF_Page_SetFlat (HPDF_Page page, HPDF_REAL flatness)
_HPDF_Page_SetFlat=haru.HPDF_Page_SetFlat
_HPDF_Page_SetFlat.restype=HPDF_STATUS
def HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
):
flatness=HPDF_REAL(flatness)
return _HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
)
# gs
#HPDF_STATUS HPDF_Page_SetExtGState (HPDF_Page page, HPDF_ExtGState ext_gstate)
HPDF_Page_SetExtGState=haru.HPDF_Page_SetExtGState
HPDF_Page_SetExtGState.restype=HPDF_STATUS
#--- Special graphic state operator --------------------------------------
# q
#HPDF_STATUS HPDF_Page_GSave (HPDF_Page page)
HPDF_Page_GSave=haru.HPDF_Page_GSave
HPDF_Page_GSave.restype=HPDF_STATUS
# Q
#HPDF_STATUS HPDF_Page_GRestore (HPDF_Page page)
HPDF_Page_GRestore=haru.HPDF_Page_GRestore
HPDF_Page_GRestore.restype=HPDF_STATUS
# cm
#HPDF_STATUS HPDF_Page_Concat (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_Concat=haru.HPDF_Page_Concat
_HPDF_Page_Concat.restype=HPDF_STATUS
def HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
#--- Path construction operator ------------------------------------------
# m
#HPDF_STATUS HPDF_Page_MoveTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTo=haru.HPDF_Page_MoveTo
_HPDF_Page_MoveTo.restype=HPDF_STATUS
def HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# l
#HPDF_STATUS HPDF_Page_LineTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_LineTo=haru.HPDF_Page_LineTo
_HPDF_Page_LineTo.restype=HPDF_STATUS
def HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# c
#HPDF_STATUS HPDF_Page_CurveTo (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo=haru.HPDF_Page_CurveTo
_HPDF_Page_CurveTo.restype=HPDF_STATUS
def HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# v
#HPDF_STATUS HPDF_Page_CurveTo2 (HPDF_Page page, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo2=haru.HPDF_Page_CurveTo2
_HPDF_Page_CurveTo2.restype=HPDF_STATUS
def HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# y
#HPDF_STATUS HPDF_Page_CurveTo3 (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo3=haru.HPDF_Page_CurveTo3
_HPDF_Page_CurveTo3.restype=HPDF_STATUS
def HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# h
#HPDF_STATUS HPDF_Page_ClosePath (HPDF_Page page)
HPDF_Page_ClosePath=haru.HPDF_Page_ClosePath
HPDF_Page_ClosePath.restype=HPDF_STATUS
# re
#HPDF_STATUS HPDF_Page_Rectangle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_Rectangle=haru.HPDF_Page_Rectangle
_HPDF_Page_Rectangle.restype=HPDF_STATUS
def HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#--- Path painting operator ---------------------------------------------
# S
#HPDF_STATUS HPDF_Page_Stroke (HPDF_Page page)
_HPDF_Page_Stroke=haru.HPDF_Page_Stroke
_HPDF_Page_Stroke.restype=HPDF_STATUS
def HPDF_Page_Stroke(
page, #HPDF_Page
):
return _HPDF_Page_Stroke(
page, #HPDF_Page
)
# s
#HPDF_STATUS HPDF_Page_ClosePathStroke (HPDF_Page page)
HPDF_Page_ClosePathStroke=haru.HPDF_Page_ClosePathStroke
HPDF_Page_ClosePathStroke.restype=HPDF_STATUS
# f
#HPDF_STATUS HPDF_Page_Fill (HPDF_Page page)
HPDF_Page_Fill=haru.HPDF_Page_Fill
HPDF_Page_Fill.restype=HPDF_STATUS
# f*
#HPDF_STATUS HPDF_Page_Eofill (HPDF_Page page)
HPDF_Page_Eofill=haru.HPDF_Page_Eofill
HPDF_Page_Eofill.restype=HPDF_STATUS
# B
#HPDF_STATUS HPDF_Page_FillStroke (HPDF_Page page)
HPDF_Page_FillStroke=haru.HPDF_Page_FillStroke
HPDF_Page_FillStroke.restype=HPDF_STATUS
# B*
#HPDF_STATUS HPDF_Page_EofillStroke (HPDF_Page page)
HPDF_Page_EofillStroke=haru.HPDF_Page_EofillStroke
HPDF_Page_EofillStroke.restype=HPDF_STATUS
# b
#HPDF_STATUS HPDF_Page_ClosePathFillStroke (HPDF_Page page)
HPDF_Page_ClosePathFillStroke=haru.HPDF_Page_ClosePathFillStroke
HPDF_Page_ClosePathFillStroke.restype=HPDF_STATUS
# b*
#HPDF_STATUS HPDF_Page_ClosePathEofillStroke (HPDF_Page page)
HPDF_Page_ClosePathEofillStroke=haru.HPDF_Page_ClosePathEofillStroke
HPDF_Page_ClosePathEofillStroke.restype=HPDF_STATUS
# n
#HPDF_STATUS HPDF_Page_EndPath (HPDF_Page page)
HPDF_Page_EndPath=haru.HPDF_Page_EndPath
HPDF_Page_EndPath.restype=HPDF_STATUS
#--- Clipping paths operator --------------------------------------------
# W
#HPDF_STATUS HPDF_Page_Clip (HPDF_Page page)
HPDF_Page_Clip=haru.HPDF_Page_Clip
HPDF_Page_Clip.restype=HPDF_STATUS
# W*
#HPDF_STATUS HPDF_Page_Eoclip (HPDF_Page page)
HPDF_Page_Eoclip=haru.HPDF_Page_Eoclip
HPDF_Page_Eoclip.restype=HPDF_STATUS
#--- Text object operator -----------------------------------------------
# BT
#HPDF_STATUS HPDF_Page_BeginText (HPDF_Page page)
HPDF_Page_BeginText=haru.HPDF_Page_BeginText
HPDF_Page_BeginText.restype=HPDF_STATUS
# ET
#HPDF_STATUS HPDF_Page_EndText (HPDF_Page page)
HPDF_Page_EndText=haru.HPDF_Page_EndText
HPDF_Page_EndText.restype=HPDF_STATUS
#--- Text state ---------------------------------------------------------
# Tc
#HPDF_STATUS HPDF_Page_SetCharSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetCharSpace=haru.HPDF_Page_SetCharSpace
_HPDF_Page_SetCharSpace.restype=HPDF_STATUS
def HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tw
#HPDF_STATUS HPDF_Page_SetWordSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWordSpace=haru.HPDF_Page_SetWordSpace
_HPDF_Page_SetWordSpace.restype=HPDF_STATUS
def HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tz
#HPDF_STATUS HPDF_Page_SetHorizontalScalling (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHorizontalScalling=haru.HPDF_Page_SetHorizontalScalling
_HPDF_Page_SetHorizontalScalling.restype=HPDF_STATUS
def HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
)
# TL
#HPDF_STATUS HPDF_Page_SetTextLeading (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextLeading=haru.HPDF_Page_SetTextLeading
_HPDF_Page_SetTextLeading.restype=HPDF_STATUS
def HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tf
#HPDF_STATUS HPDF_Page_SetFontAndSize (HPDF_Page page, HPDF_Font font, HPDF_REAL size)
_HPDF_Page_SetFontAndSize=haru.HPDF_Page_SetFontAndSize
_HPDF_Page_SetFontAndSize.restype=HPDF_STATUS
def HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
):
size=HPDF_REAL(size)
return _HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
)
# Tr
#HPDF_STATUS HPDF_Page_SetTextRenderingMode (HPDF_Page page, HPDF_TextRenderingMode mode)
HPDF_Page_SetTextRenderingMode=haru.HPDF_Page_SetTextRenderingMode
HPDF_Page_SetTextRenderingMode.restype=HPDF_STATUS
# Ts
#HPDF_STATUS HPDF_Page_SetTextRise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRise=haru.HPDF_Page_SetTextRise
_HPDF_Page_SetTextRise.restype=HPDF_STATUS
def HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
)
# This function is obsolete. Use HPDF_Page_SetTextRise.
#HPDF_STATUS HPDF_Page_SetTextRaise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRaise=haru.HPDF_Page_SetTextRaise
_HPDF_Page_SetTextRaise.restype=HPDF_STATUS
def HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
)
#--- Text positioning ---------------------------------------------------
# Td
#HPDF_STATUS HPDF_Page_MoveTextPos (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos=haru.HPDF_Page_MoveTextPos
_HPDF_Page_MoveTextPos.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# TD
#HPDF_STATUS HPDF_Page_MoveTextPos2 (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos2=haru.HPDF_Page_MoveTextPos2
_HPDF_Page_MoveTextPos2.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# Tm
#HPDF_STATUS HPDF_Page_SetTextMatrix (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_SetTextMatrix=haru.HPDF_Page_SetTextMatrix
_HPDF_Page_SetTextMatrix.restype=HPDF_STATUS
def HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
# T*
#HPDF_STATUS HPDF_Page_MoveToNextLine (HPDF_Page page)
HPDF_Page_MoveToNextLine=haru.HPDF_Page_MoveToNextLine
HPDF_Page_MoveToNextLine.restype=HPDF_STATUS
#--- Text showing -------------------------------------------------------
# Tj
#HPDF_STATUS HPDF_Page_ShowText (HPDF_Page page, const char *text)
_HPDF_Page_ShowText=haru.HPDF_Page_ShowText
_HPDF_Page_ShowText.restype=HPDF_STATUS
def HPDF_Page_ShowText(page,
text
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_ShowText(page,
text
)
# TJ
# '
#HPDF_STATUS HPDF_Page_ShowTextNextLine (HPDF_Page page, const char *text)
HPDF_Page_ShowTextNextLine=haru.HPDF_Page_ShowTextNextLine
HPDF_Page_ShowTextNextLine.restype=HPDF_STATUS
# "
#HPDF_STATUS HPDF_Page_ShowTextNextLineEx (HPDF_Page page, HPDF_REAL word_space, HPDF_REAL char_space, const char *text)
_HPDF_Page_ShowTextNextLineEx=haru.HPDF_Page_ShowTextNextLineEx
_HPDF_Page_ShowTextNextLineEx.restype=HPDF_STATUS
def HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
):
word_space=HPDF_REAL(word_space)
char_space=HPDF_REAL(char_space)
return _HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
)
#--- Color showing ------------------------------------------------------
# cs --not implemented yet
# CS --not implemented yet
# sc --not implemented yet
# scn --not implemented yet
# SC --not implemented yet
# SCN --not implemented yet
# g
#HPDF_STATUS HPDF_Page_SetGrayFill (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayFill=haru.HPDF_Page_SetGrayFill
_HPDF_Page_SetGrayFill.restype=HPDF_STATUS
def HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
)
# G
#HPDF_STATUS HPDF_Page_SetGrayStroke (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayStroke=haru.HPDF_Page_SetGrayStroke
_HPDF_Page_SetGrayStroke.restype=HPDF_STATUS
def HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
)
# rg
#HPDF_STATUS HPDF_Page_SetRGBFill (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBFill=haru.HPDF_Page_SetRGBFill
_HPDF_Page_SetRGBFill.restype=HPDF_STATUS
def HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# RG
#HPDF_STATUS HPDF_Page_SetRGBStroke (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBStroke=haru.HPDF_Page_SetRGBStroke
_HPDF_Page_SetRGBStroke.restype=HPDF_STATUS
def HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# k
#HPDF_STATUS HPDF_Page_SetCMYKFill (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKFill=haru.HPDF_Page_SetCMYKFill
_HPDF_Page_SetCMYKFill.restype=HPDF_STATUS
def HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
# K
#HPDF_STATUS HPDF_Page_SetCMYKStroke (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKStroke=haru.HPDF_Page_SetCMYKStroke
_HPDF_Page_SetCMYKStroke.restype=HPDF_STATUS
def HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
#--- Shading patterns ---------------------------------------------------
# sh --not implemented yet
#--- In-line images -----------------------------------------------------
# BI --not implemented yet
# ID --not implemented yet
# EI --not implemented yet
#--- XObjects -----------------------------------------------------------
# Do
#HPDF_STATUS HPDF_Page_ExecuteXObject (HPDF_Page page, HPDF_XObject obj)
HPDF_Page_ExecuteXObject=haru.HPDF_Page_ExecuteXObject
HPDF_Page_ExecuteXObject.restype=HPDF_STATUS
#--- Marked content -----------------------------------------------------
# BMC --not implemented yet
# BDC --not implemented yet
# EMC --not implemented yet
# MP --not implemented yet
# DP --not implemented yet
#--- Compatibility ------------------------------------------------------
# BX --not implemented yet
# EX --not implemented yet
#HPDF_STATUS HPDF_Page_DrawImage (HPDF_Page page, HPDF_Image image, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_DrawImage=haru.HPDF_Page_DrawImage
_HPDF_Page_DrawImage.restype=HPDF_STATUS
def HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Circle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray)
_HPDF_Page_Circle=haru.HPDF_Page_Circle
_HPDF_Page_Circle.restype=HPDF_STATUS
def HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
return _HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Ellipse (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL xray, HPDF_REAL yray)
_HPDF_Page_Ellipse=haru.HPDF_Page_Ellipse
_HPDF_Page_Ellipse.restype=HPDF_STATUS
def HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
xray=HPDF_REAL(xray)
yray=HPDF_REAL(yray)
return _HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Arc (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray, HPDF_REAL ang1, HPDF_REAL ang2)
_HPDF_Page_Arc=haru.HPDF_Page_Arc
_HPDF_Page_Arc.restype=HPDF_STATUS
def HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
ang1=HPDF_REAL(ang1)
ang2=HPDF_REAL(ang2)
return _HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_TextOut (HPDF_Page page, HPDF_REAL xpos, HPDF_REAL ypos, const char *text)
_HPDF_Page_TextOut=haru.HPDF_Page_TextOut
_HPDF_Page_TextOut.restype=HPDF_STATUS
def HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
):
xpos=HPDF_REAL(xpos)
ypos=HPDF_REAL(ypos)
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
)
#HPDF_STATUS HPDF_Page_TextRect (HPDF_Page page, HPDF_REAL left, HPDF_REAL top, HPDF_REAL right, HPDF_REAL bottom, const char *text, HPDF_TextAlignment align, HPDF_UINT *len)
#???
_HPDF_Page_TextRect=haru.HPDF_Page_TextRect
_HPDF_Page_TextRect.restype=HPDF_STATUS
def HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
right=HPDF_REAL(right)
bottom=HPDF_REAL(bottom)
if type(length) in (types.ListType, types.TupleType):
size=len(length)
length=pointer((HPDF_UINT*size)(*length))
return _HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
)
#HPDF_STATUS HPDF_Page_SetSlideShow (HPDF_Page page, HPDF_TransitionStyle type, HPDF_REAL disp_time, HPDF_REAL trans_time)
_HPDF_Page_SetSlideShow=haru.HPDF_Page_SetSlideShow
_HPDF_Page_SetSlideShow.restype=HPDF_STATUS
def HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
):
disp_time=HPDF_REAL(disp_time)
trans_time=HPDF_REAL(trans_time)
return _HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
)
NULL=0
HPDF_NOPNGLIB=False
| skytrack/tps5 | src/lib/libharu/if/python/hpdf.py | Python | gpl-2.0 | 77,143 |
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: raw
version_added: historical
short_description: Executes a low-down and dirty SSH command
options:
free_form:
description:
- the raw module takes a free form command to run
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to M(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Bootstrap a legacy python 2.4 host
- raw: yum -y install python-simplejson
'''
| amandolo/ansible-modules-core | commands/raw.py | Python | gpl-3.0 | 1,845 |
"""
TODO: add a docstring.
"""
class Delimiters(object):
def first(self):
return "It worked the first time."
def second(self):
return "And it worked the second time."
def third(self):
return "Then, surprisingly, it worked the third time."
| GbalsaC/bitnamiP | venv/src/pystache-custom/pystache_custom/tests/examples/delimiters.py | Python | agpl-3.0 | 281 |
""" Various kinds of icon widgets.
"""
from __future__ import absolute_import
from ...properties import Bool, Float, Enum
from ...enums import NamedIcon
from ..widget import Widget
class AbstractIcon(Widget):
""" An abstract base class for icon widgets. ``AbstractIcon``
is not generally useful to instantiate on its own.
"""
class Icon(AbstractIcon):
""" A "stock" icon based on FontAwesome.
"""
name = Enum(NamedIcon, help="""
What icon to use. See http://fortawesome.github.io/Font-Awesome/icons/
for the list of available icons.
""")
size = Float(None, help="""
The size multiplier (1x, 2x, ..., 5x).
""")
flip = Enum("horizontal", "vertical", default=None, help="""
Optionally flip the icon horizontally or vertically.
""")
spin = Bool(False, help="""
Indicates a spinning (animated) icon. This value is ignored for
icons that do not support spinning.
""")
| rhiever/bokeh | bokeh/models/widgets/icons.py | Python | bsd-3-clause | 948 |
from __future__ import unicode_literals
from .novamov import NovaMovIE
class NowVideoIE(NovaMovIE):
IE_NAME = 'nowvideo'
IE_DESC = 'NowVideo'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'}
_HOST = 'www.nowvideo.ch'
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
_FILEKEY_REGEX = r'var fkzd="([^"]+)";'
_TITLE_REGEX = r'<h4>([^<]+)</h4>'
_DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
_TEST = {
'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa',
'md5': 'f8fbbc8add72bd95b7850c6a02fc8817',
'info_dict': {
'id': '0mw0yow7b6dxa',
'ext': 'flv',
'title': 'youtubedl test video _BaW_jenozKc.mp4',
'description': 'Description',
}
}
| MarkTheF4rth/youtube-dl | youtube_dl/extractor/nowvideo.py | Python | unlicense | 824 |
"""
This file tests the MNISTPlus class. majorly concerning the X and y member
of the dataset and their corresponding sizes, data scales and topological
views.
"""
from pylearn2.datasets.mnistplus import MNISTPlus
from pylearn2.space import IndexSpace, VectorSpace
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
def test_MNISTPlus():
"""
Test the MNISTPlus warper.
Tests the scale of data, the splitting of train, valid, test sets.
Tests that a topological batch has 4 dimensions.
Tests that it work well with selected type of augmentation.
"""
skip_if_no_data()
for subset in ['train', 'valid', 'test']:
ids = MNISTPlus(which_set=subset)
assert 0.01 >= ids.X.min() >= 0.0
assert 0.99 <= ids.X.max() <= 1.0
topo = ids.get_batch_topo(1)
assert topo.ndim == 4
del ids
train_y = MNISTPlus(which_set='train', label_type='label')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='azimuth')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert 0.0 <= train_y.y.max() <= 1.0
assert 0.0 <= train_y.y.min() <= 1.0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='rotation')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='texture_id')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
| JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/datasets/tests/test_mnistplus.py | Python | bsd-3-clause | 1,978 |
"""Regresssion tests for urllib"""
import urllib
import httplib
import unittest
import os
import sys
import mimetools
import tempfile
import StringIO
from test import test_support
from base64 import b64encode
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed:
return ""
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return ""
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = ""
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp('Hello!')
try:
fp = urllib.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp("""HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file:README
Connection: close
Content-Type: text/html; charset=iso-8859-1
""")
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
def test_missing_localfile(self):
self.assertRaises(IOError, urllib.urlopen,
'file://localhost/a/missing/file.py')
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
fp = urllib.urlopen(tmp_fileurl)
finally:
os.close(fd)
fp.close()
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
self.assertRaises(IOError, urllib.urlopen, tmp_fileurl)
def test_ftp_nonexisting(self):
self.assertRaises(IOError, urllib.urlopen,
'ftp://localhost/not/existing/file.py')
def test_userpass_inurl(self):
self.fakehttp('Hello!')
try:
fakehttp_wrapper = httplib.HTTP._connection_class
fp = urllib.urlopen("http://user:[email protected]/")
authorization = ("Authorization: Basic %s\r\n" %
b64encode('user:pass'))
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
self.assertEqual(fp.geturl(), 'http://user:[email protected]/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_with_spaces_inurl(self):
self.fakehttp('Hello!')
try:
url = "http://a b:c [email protected]/"
fakehttp_wrapper = httplib.HTTP._connection_class
authorization = ("Authorization: Basic %s\r\n" %
b64encode('a b:c d'))
fp = urllib.urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve,
'http://example.com', reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
result = urllib.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
self.assertRaises(TypeError, urllib.quote, None)
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the nturl2path library')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.url2pathname(url)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of the password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b'))
self.assertEqual(('user', 'a b'),urllib.splitpasswd('user:a b'))
self.assertEqual(('user 2', 'ab'),urllib.splitpasswd('user 2:ab'))
self.assertEqual(('user+1', 'a+b'),urllib.splitpasswd('user+1:a+b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
urlretrieve_HttpTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
| teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_urllib.py | Python | gpl-2.0 | 35,463 |
# -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, *args, **kwargs):
super(ROCIFField, self).__init__(r'^[0-9]{2,10}', max_length=10,
min_length=2, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13,
min_length=13, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
from ro_counties import COUNTIES_CHOICES
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
from ro_counties import COUNTIES_CHOICES
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length=40, min_length=24, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length=20, min_length=10, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length=6, min_length=6, *args, **kwargs)
| nycholas/ask-undrgz | src/ask-undrgz/django/contrib/localflavor/ro/forms.py | Python | bsd-3-clause | 6,464 |
#!/usr/bin/env python
from __future__ import unicode_literals
import io
import optparse
import os
import sys
# Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, ROOT_DIR)
import youtube_dl
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')
outfile, = args
def gen_ies_md(ies):
for ie in ies:
ie_md = '**{0}**'.format(ie.IE_NAME)
ie_desc = getattr(ie, 'IE_DESC', None)
if ie_desc is False:
continue
if ie_desc is not None:
ie_md += ': {0}'.format(ie.IE_DESC)
if not ie.working():
ie_md += ' (Currently broken)'
yield ie_md
ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower())
out = '# Supported sites\n' + ''.join(
' - ' + md + '\n'
for md in gen_ies_md(ies))
with io.open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out)
if __name__ == '__main__':
main()
| MarkTheF4rth/youtube-dl | devscripts/make_supportedsites.py | Python | unlicense | 1,152 |
#
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='johab',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/encodings/johab.py | Python | epl-1.0 | 1,023 |
#!/usr/bin/python
# Copyright 2007 Rene Rivera.
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Added to guard against a bug causing targets to be used before they
# themselves have finished building. This used to happen for targets built by a
# multi-file action that got triggered by another target.
#
# Example:
# When target A and target B were declared as created by a single action and
# target A triggered running that action then, while the action was still
# running, target B was already reporting as being built causing other targets
# depending on target A to be built prematurely.
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("file.jam", """\
if $(NT)
{
SLEEP = @call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
actions .gen.
{
echo 001
$(SLEEP) 4
echo 002
}
rule .use.1 { DEPENDS $(<) : $(>) ; }
actions .use.1
{
echo 003
}
rule .use.2 { DEPENDS $(<) : $(>) ; }
actions .use.2
{
$(SLEEP) 1
echo 004
}
.gen. g1.generated g2.generated ;
.use.1 u1.user : g1.generated ;
.use.2 u2.user : g2.generated ;
DEPENDS all : u1.user u2.user ;
""")
t.run_build_system(["-ffile.jam", "-j2"], stdout="""\
...found 5 targets...
...updating 4 targets...
.gen. g1.generated
001
002
.use.1 u1.user
003
.use.2 u2.user
004
...updated 4 targets...
""")
t.cleanup()
| NixaSoftware/CVis | venv/bin/tools/build/v2/test/core_parallel_multifile_actions_1.py | Python | apache-2.0 | 1,606 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_password
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP password configuration.
description:
- Manages VTP password configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP passwords.
- Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version)
to fully manage VTP operations.
- You can set/remove password only if a VTP domain already exist.
- If C(state=absent) and no C(vtp_password) is provided, it remove the current
VTP password.
- If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password)
has to match the existing one in order to remove it.
options:
vtp_password:
description:
- VTP password
required: false
default: null
state:
description:
- Manage the state of the resource
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ENSURE VTP PASSWORD IS SET
- nxos_vtp_password:
password: ntc
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ENSURE VTP PASSWORD IS REMOVED
- nxos_vtp_password:
password: ntc
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vtp_password": "new_ntc"}
existing:
description:
- k/v pairs of existing vtp
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"}
end_state:
description: k/v pairs of vtp after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp password new_ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
vtp_password=dict(type='str', no_log=True),
state=dict(choices=['absent', 'present'],
default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
vtp_password = module.params['vtp_password'] or None
state = module.params['state']
existing = get_vtp_config(module)
end_state = existing
args = dict(vtp_password=vtp_password)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'absent':
if vtp_password is not None:
if existing['vtp_password'] == proposed['vtp_password']:
commands.append(['no vtp password'])
else:
module.fail_json(msg="Proposed vtp password doesn't match "
"current vtp password. It cannot be "
"removed when state=absent. If you are "
"trying to change the vtp password, use "
"state=present.")
else:
if not existing.get('domain'):
module.fail_json(msg='Cannot remove a vtp password '
'before vtp domain is set.')
elif existing['vtp_password'] != ('\\'):
commands.append(['no vtp password'])
elif state == 'present':
if delta:
if not existing.get('domain'):
module.fail_json(msg='Cannot set vtp password '
'before vtp domain is set.')
else:
commands.append(['vtp password {0}'.format(vtp_password)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| RackSec/ansible | lib/ansible/modules/network/nxos/nxos_vtp_password.py | Python | gpl-3.0 | 8,006 |
class C:
def foo(self):
x = 1
y = 2
x = 1
def foo():
pass | akosyakov/intellij-community | python/testData/copyPaste/singleLine/IndentOnTopLevel.after.py | Python | apache-2.0 | 83 |
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import getpass
import os
import urllib
DEFAULT_GAIA_URL = "https://www.google.com:443/accounts/ClientLogin"
class GaiaAuthenticator:
def __init__(self, service, url = DEFAULT_GAIA_URL):
self._service = service
self._url = url
## Logins to gaia and returns auth token.
def authenticate(self, email, passwd):
params = urllib.urlencode({'Email': email, 'Passwd': passwd,
'source': 'chromoting',
'service': self._service,
'PersistentCookie': 'true',
'accountType': 'GOOGLE'})
f = urllib.urlopen(self._url, params);
result = f.read()
for line in result.splitlines():
if line.startswith('Auth='):
auth_string = line[5:]
return auth_string
raise Exception("Gaia didn't return auth token: " + result)
| plxaye/chromium | src/remoting/tools/gaia_auth.py | Python | apache-2.0 | 1,043 |
# Copyright 2014 Michal Nowikowski.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Checker for spelling errors in comments and docstrings.
"""
import sys
import tokenize
import string
import re
if sys.version_info[0] >= 3:
maketrans = str.maketrans
else:
maketrans = string.maketrans
from pylint.interfaces import ITokenChecker, IAstroidChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
try:
import enchant
except ImportError:
enchant = None
if enchant is not None:
br = enchant.Broker()
dicts = br.list_dicts()
dict_choices = [''] + [d[0] for d in dicts]
dicts = ["%s (%s)" % (d[0], d[1].name) for d in dicts]
dicts = ", ".join(dicts)
instr = ""
else:
dicts = "none"
dict_choices = ['']
instr = " To make it working install python-enchant package."
table = maketrans("", "")
class SpellingChecker(BaseTokenChecker):
"""Check spelling in comments and docstrings"""
__implements__ = (ITokenChecker, IAstroidChecker)
name = 'spelling'
msgs = {
'C0401': ('Wrong spelling of a word \'%s\' in a comment:\n%s\n'
'%s\nDid you mean: \'%s\'?',
'wrong-spelling-in-comment',
'Used when a word in comment is not spelled correctly.'),
'C0402': ('Wrong spelling of a word \'%s\' in a docstring:\n%s\n'
'%s\nDid you mean: \'%s\'?',
'wrong-spelling-in-docstring',
'Used when a word in docstring is not spelled correctly.'),
}
options = (('spelling-dict',
{'default' : '', 'type' : 'choice', 'metavar' : '<dict name>',
'choices': dict_choices,
'help' : 'Spelling dictionary name. '
'Available dictionaries: %s.%s' % (dicts, instr)}),
('spelling-ignore-words',
{'default' : '',
'type' : 'string',
'metavar' : '<comma separated words>',
'help' : 'List of comma separated words that '
'should not be checked.'}),
('spelling-private-dict-file',
{'default' : '',
'type' : 'string',
'metavar' : '<path to file>',
'help' : 'A path to a file that contains private '
'dictionary; one word per line.'}),
('spelling-store-unknown-words',
{'default' : 'n', 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Tells whether to store unknown words to '
'indicated private dictionary in '
'--spelling-private-dict-file option instead of '
'raising a message.'}),
)
def open(self):
self.initialized = False
self.private_dict_file = None
if enchant is None:
return
dict_name = self.config.spelling_dict
if not dict_name:
return
self.ignore_list = [w.strip() for w in self.config.spelling_ignore_words.split(",")]
# "param" appears in docstring in param description and
# "pylint" appears in comments in pylint pragmas.
self.ignore_list.extend(["param", "pylint"])
if self.config.spelling_private_dict_file:
self.spelling_dict = enchant.DictWithPWL(
dict_name, self.config.spelling_private_dict_file)
self.private_dict_file = open(
self.config.spelling_private_dict_file, "a")
else:
self.spelling_dict = enchant.Dict(dict_name)
if self.config.spelling_store_unknown_words:
self.unknown_words = set()
# Prepare regex for stripping punctuation signs from text.
# ' and _ are treated in a special way.
puncts = string.punctuation.replace("'", "").replace("_", "")
self.punctuation_regex = re.compile('[%s]' % re.escape(puncts))
self.initialized = True
def close(self):
if self.private_dict_file:
self.private_dict_file.close()
def _check_spelling(self, msgid, line, line_num):
line2 = line.strip()
# Replace ['afadf with afadf (but preserve don't)
line2 = re.sub("'([^a-zA-Z]|$)", " ", line2)
# Replace afadf'] with afadf (but preserve don't)
line2 = re.sub("([^a-zA-Z]|^)'", " ", line2)
# Replace punctuation signs with space e.g. and/or -> and or
line2 = self.punctuation_regex.sub(' ', line2)
words = []
for word in line2.split():
# Skip words with digits.
if len(re.findall(r"\d", word)) > 0:
continue
# Skip words with mixed big and small letters,
# they are probaly class names.
if (len(re.findall("[A-Z]", word)) > 0 and
len(re.findall("[a-z]", word)) > 0 and
len(word) > 2):
continue
# Skip words with _ - they are probably function parameter names.
if word.count('_') > 0:
continue
words.append(word)
# Go through words and check them.
for word in words:
# Skip words from ignore list.
if word in self.ignore_list:
continue
orig_word = word
word = word.lower()
# Strip starting u' from unicode literals and r' from raw strings.
if (word.startswith("u'") or
word.startswith('u"') or
word.startswith("r'") or
word.startswith('r"')) and len(word) > 2:
word = word[2:]
# If it is a known word, then continue.
if self.spelling_dict.check(word):
continue
# Store word to private dict or raise a message.
if self.config.spelling_store_unknown_words:
if word not in self.unknown_words:
self.private_dict_file.write("%s\n" % word)
self.unknown_words.add(word)
else:
# Present up to 4 suggestions.
# TODO: add support for customising this.
suggestions = self.spelling_dict.suggest(word)[:4]
m = re.search(r"(\W|^)(%s)(\W|$)" % word, line.lower())
if m:
# Start position of second group in regex.
col = m.regs[2][0]
else:
col = line.lower().index(word)
indicator = (" " * col) + ("^" * len(word))
self.add_message(msgid, line=line_num,
args=(orig_word, line,
indicator,
"' or '".join(suggestions)))
def process_tokens(self, tokens):
if not self.initialized:
return
# Process tokens and look for comments.
for (tok_type, token, (start_row, _), _, _) in tokens:
if tok_type == tokenize.COMMENT:
self._check_spelling('wrong-spelling-in-comment',
token, start_row)
@check_messages('wrong-spelling-in-docstring')
def visit_module(self, node):
if not self.initialized:
return
self._check_docstring(node)
@check_messages('wrong-spelling-in-docstring')
def visit_class(self, node):
if not self.initialized:
return
self._check_docstring(node)
@check_messages('wrong-spelling-in-docstring')
def visit_function(self, node):
if not self.initialized:
return
self._check_docstring(node)
def _check_docstring(self, node):
"""check the node has any spelling errors"""
docstring = node.doc
if not docstring:
return
start_line = node.lineno + 1
# Go through lines of docstring
for idx, line in enumerate(docstring.splitlines()):
self._check_spelling('wrong-spelling-in-docstring',
line, start_line + idx)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(SpellingChecker(linter))
| Shouqun/node-gn | tools/depot_tools/third_party/pylint/checkers/spelling.py | Python | mit | 9,020 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
determine_ext,
int_or_none,
xpath_text,
)
class InternetVideoArchiveIE(InfoExtractor):
_VALID_URL = r'https?://video\.internetvideoarchive\.net/(?:player|flash/players)/.*?\?.*?publishedid.*?'
_TEST = {
'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?customerid=69249&publishedid=194487&reporttag=vdbetatitle&playerid=641&autolist=0&domain=www.videodetective.com&maxrate=high&minrate=low&socialplayer=false',
'info_dict': {
'id': '194487',
'ext': 'mp4',
'title': 'KICK-ASS 2',
'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
@staticmethod
def _build_json_url(query):
return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query
@staticmethod
def _build_xml_url(query):
return 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?' + query
def _real_extract(self, url):
query = compat_urlparse.urlparse(url).query
query_dic = compat_parse_qs(query)
video_id = query_dic['publishedid'][0]
if '/player/' in url:
configuration = self._download_json(url, video_id)
# There are multiple videos in the playlist whlie only the first one
# matches the video played in browsers
video_info = configuration['playlist'][0]
title = video_info['title']
formats = []
for source in video_info['sources']:
file_url = source['file']
if determine_ext(file_url) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
file_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
if m3u8_formats:
formats.extend(m3u8_formats)
file_url = m3u8_formats[0]['url']
formats.extend(self._extract_f4m_formats(
file_url.replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
formats.extend(self._extract_mpd_formats(
file_url.replace('.m3u8', '.mpd'),
video_id, mpd_id='dash', fatal=False))
else:
a_format = {
'url': file_url,
}
if source.get('label') and source['label'][-4:] == ' kbs':
tbr = int_or_none(source['label'][:-4])
a_format.update({
'tbr': tbr,
'format_id': 'http-%d' % tbr,
})
formats.append(a_format)
self._sort_formats(formats)
description = video_info.get('description')
thumbnail = video_info.get('image')
else:
configuration = self._download_xml(url, video_id)
formats = [{
'url': xpath_text(configuration, './file', 'file URL', fatal=True),
}]
thumbnail = xpath_text(configuration, './image', 'thumbnail')
title = 'InternetVideoArchive video %s' % video_id
description = None
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
}
| nickleefly/youtube-dl | youtube_dl/extractor/internetvideoarchive.py | Python | unlicense | 3,764 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Blog Category') | indautgrp/frappe | frappe/website/doctype/blog_category/test_blog_category.py | Python | mit | 211 |
import re
from django.template import Node, Variable, VariableNode
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.template.base import _render_value_in_context
from django.utils import translation
from django.utils.encoding import force_unicode
from django.template.defaulttags import token_kwargs
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context[v], context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
pos = None
m = re.match("^'([^']+)'(\|.*$)",value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError("only option for 'trans' is 'noop'")
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = options['count'].items()[0]
else:
countervar, counter = None, None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_language_info', do_get_language_info)
register.tag('get_language_info_list', do_get_language_info_list)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
register.filter(language_name)
register.filter(language_name_local)
register.filter(language_bidi)
| bdelliott/wordgame | web/django/templatetags/i18n.py | Python | mit | 13,319 |
"""This test checks for correct wait4() behavior.
"""
import os
import time
from test.fork_wait import ForkWait
from test.test_support import run_unittest, reap_children, get_attribute
# If either of these do not exist, skip this test.
get_attribute(os, 'fork')
get_attribute(os, 'wait4')
class Wait4Test(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# wait4() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status, rusage = os.wait4(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
self.assertTrue(rusage)
def test_main():
run_unittest(Wait4Test)
reap_children()
if __name__ == "__main__":
test_main()
| teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_wait4.py | Python | gpl-2.0 | 940 |
a = {'b',] | idea4bsd/idea4bsd | python/testData/psi/NotClosedBraceSet.py | Python | apache-2.0 | 10 |
from __future__ import division, absolute_import, print_function
import unittest
import os
import sys
import copy
from numpy import (
array, alltrue, ndarray, zeros, dtype, intp, clongdouble
)
from numpy.testing import (
run_module_suite, assert_, assert_equal, SkipTest
)
from numpy.core.multiarray import typeinfo
import util
wrap = None
def setup():
"""
Build the required testing extension module
"""
global wrap
# Check compiler availability first
if not util.has_c_compiler():
raise SkipTest("No C compiler available")
if wrap is None:
config_code = """
config.add_extension('test_array_from_pyobj_ext',
sources=['wrapmodule.c', 'fortranobject.c'],
define_macros=[])
"""
d = os.path.dirname(__file__)
src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'),
os.path.join(d, '..', 'src', 'fortranobject.c'),
os.path.join(d, '..', 'src', 'fortranobject.h')]
wrap = util.build_module_distutils(src, config_code,
'test_array_from_pyobj_ext')
def flags_info(arr):
flags = wrap.array_attrs(arr)[6]
return flags2names(flags)
def flags2names(flags):
info = []
for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY',
'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE',
'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO',
'CARRAY', 'FARRAY'
]:
if abs(flags) & getattr(wrap, flagname, 0):
info.append(flagname)
return info
class Intent(object):
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
flags = 0
for i in intent_list:
if i == 'optional':
flags |= wrap.F2PY_OPTIONAL
else:
flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper())
self.flags = flags
def __getattr__(self, name):
name = name.lower()
if name == 'in_':
name = 'in'
return self.__class__(self.intent_list + [name])
def __str__(self):
return 'intent(%s)' % (','.join(self.intent_list))
def __repr__(self):
return 'Intent(%r)' % (self.intent_list)
def is_intent(self, *names):
for name in names:
if name not in self.intent_list:
return False
return True
def is_intent_exact(self, *names):
return len(self.intent_list) == len(names) and self.is_intent(*names)
intent = Intent()
_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT',
'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG',
'FLOAT', 'DOUBLE', 'CFLOAT']
_cast_dict = {'BOOL': ['BOOL']}
_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE']
_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE']
_cast_dict['BYTE'] = ['BYTE']
_cast_dict['UBYTE'] = ['UBYTE']
_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT']
_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT']
_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT']
_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT']
_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG']
_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG']
_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG']
_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG']
_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT']
_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE']
_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
# 32 bit system malloc typically does not provide the alignment required by
# 16 byte long double types this means the inout intent cannot be satisfied
# and several tests fail as the alignment flag can be randomly true or fals
# when numpy gains an aligned allocator the tests could be enabled again
if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and
sys.platform != 'win32'):
_type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE'])
_cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \
['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE']
_cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \
['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE']
_cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE']
class Type(object):
_type_cache = {}
def __new__(cls, name):
if isinstance(name, dtype):
dtype0 = name
name = None
for n, i in typeinfo.items():
if isinstance(i, tuple) and dtype0.type is i[-1]:
name = n
break
obj = cls._type_cache.get(name.upper(), None)
if obj is not None:
return obj
obj = object.__new__(cls)
obj._init(name)
cls._type_cache[name.upper()] = obj
return obj
def _init(self, name):
self.NAME = name.upper()
self.type_num = getattr(wrap, 'NPY_' + self.NAME)
assert_equal(self.type_num, typeinfo[self.NAME][1])
self.dtype = typeinfo[self.NAME][-1]
self.elsize = typeinfo[self.NAME][2] / 8
self.dtypechar = typeinfo[self.NAME][0]
def cast_types(self):
return [self.__class__(_m) for _m in _cast_dict[self.NAME]]
def all_types(self):
return [self.__class__(_m) for _m in _type_names]
def smaller_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in _type_names:
if typeinfo[name][3] < bits:
types.append(Type(name))
return types
def equal_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in _type_names:
if name == self.NAME:
continue
if typeinfo[name][3] == bits:
types.append(Type(name))
return types
def larger_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in _type_names:
if typeinfo[name][3] > bits:
types.append(Type(name))
return types
class Array(object):
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
self.intent = intent
self.obj_copy = copy.deepcopy(obj)
self.obj = obj
# arr.dtypechar may be different from typ.dtypechar
self.arr = wrap.call(typ.type_num, dims, intent.flags, obj)
assert_(isinstance(self.arr, ndarray), repr(type(self.arr)))
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims) > 1:
if self.intent.is_intent('c'):
assert_(intent.flags & wrap.F2PY_INTENT_C)
assert_(not self.arr.flags['FORTRAN'],
repr((self.arr.flags, getattr(obj, 'flags', None))))
assert_(self.arr.flags['CONTIGUOUS'])
assert_(not self.arr_attr[6] & wrap.FORTRAN)
else:
assert_(not intent.flags & wrap.F2PY_INTENT_C)
assert_(self.arr.flags['FORTRAN'])
assert_(not self.arr.flags['CONTIGUOUS'])
assert_(self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
self.pyarr = None
self.pyarr_attr = None
return
if intent.is_intent('cache'):
assert_(isinstance(obj, ndarray), repr(type(obj)))
self.pyarr = array(obj).reshape(*dims).copy()
else:
self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims),
order=self.intent.is_intent('c') and 'C' or 'F')
assert_(self.pyarr.dtype == typ,
repr((self.pyarr.dtype, typ)))
assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims) > 1:
if self.intent.is_intent('c'):
assert_(not self.pyarr.flags['FORTRAN'])
assert_(self.pyarr.flags['CONTIGUOUS'])
assert_(not self.pyarr_attr[6] & wrap.FORTRAN)
else:
assert_(self.pyarr.flags['FORTRAN'])
assert_(not self.pyarr.flags['CONTIGUOUS'])
assert_(self.pyarr_attr[6] & wrap.FORTRAN)
assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd
assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions
if self.arr_attr[1] <= 1:
assert_(self.arr_attr[3] == self.pyarr_attr[3],
repr((self.arr_attr[3], self.pyarr_attr[3],
self.arr.tobytes(), self.pyarr.tobytes()))) # strides
assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:],
repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr
assert_(self.arr_attr[6] == self.pyarr_attr[6],
repr((self.arr_attr[6], self.pyarr_attr[6],
flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
flags2names(self.arr_attr[6]), intent))) # flags
if intent.is_intent('cache'):
assert_(self.arr_attr[5][3] >= self.type.elsize,
repr((self.arr_attr[5][3], self.type.elsize)))
else:
assert_(self.arr_attr[5][3] == self.type.elsize,
repr((self.arr_attr[5][3], self.type.elsize)))
assert_(self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, ndarray):
if typ.elsize == Type(obj.dtype).elsize:
if not intent.is_intent('copy') and self.arr_attr[1] <= 1:
assert_(self.has_shared_memory())
def arr_equal(self, arr1, arr2):
if arr1.shape != arr2.shape:
return False
s = arr1 == arr2
return alltrue(s.flatten())
def __str__(self):
return str(self.arr)
def has_shared_memory(self):
"""Check that created array shares data with input array.
"""
if self.obj is self.arr:
return True
if not isinstance(self.obj, ndarray):
return False
obj_attr = wrap.array_attrs(self.obj)
return obj_attr[0] == self.arr_attr[0]
class test_intent(unittest.TestCase):
def test_in_out(self):
assert_equal(str(intent.in_.out), 'intent(in,out)')
assert_(intent.in_.c.is_intent('c'))
assert_(not intent.in_.c.is_intent_exact('c'))
assert_(intent.in_.c.is_intent_exact('c', 'in'))
assert_(intent.in_.c.is_intent_exact('in', 'c'))
assert_(not intent.in_.is_intent('c'))
class _test_shared_memory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
def test_in_from_2seq(self):
a = self.array([2], intent.in_, self.num2seq)
assert_(not a.has_shared_memory())
def test_in_from_2casttype(self):
for t in self.type.cast_types():
obj = array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_, obj)
if t.elsize == self.type.elsize:
assert_(
a.has_shared_memory(), repr((self.type.dtype, t.dtype)))
else:
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_inout_2seq(self):
obj = array(self.num2seq, dtype=self.type.dtype)
a = self.array([len(self.num2seq)], intent.inout, obj)
assert_(a.has_shared_memory())
try:
a = self.array([2], intent.in_.inout, self.num2seq)
except TypeError as msg:
if not str(msg).startswith('failed to initialize intent'
'(inout|inplace|cache) array'):
raise
else:
raise SystemError('intent(inout) should have failed on sequence')
def test_f_inout_23seq(self):
obj = array(self.num23seq, dtype=self.type.dtype, order='F')
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.inout, obj)
assert_(a.has_shared_memory())
obj = array(self.num23seq, dtype=self.type.dtype, order='C')
shape = (len(self.num23seq), len(self.num23seq[0]))
try:
a = self.array(shape, intent.in_.inout, obj)
except ValueError as msg:
if not str(msg).startswith('failed to initialize intent'
'(inout) array'):
raise
else:
raise SystemError(
'intent(inout) should have failed on improper array')
def test_c_inout_23seq(self):
obj = array(self.num23seq, dtype=self.type.dtype)
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.c.inout, obj)
assert_(a.has_shared_memory())
def test_in_copy_from_2casttype(self):
for t in self.type.cast_types():
obj = array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_.copy, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_in_from_23seq(self):
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_, self.num23seq)
assert_(not a.has_shared_memory())
def test_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq, dtype=t.dtype)
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_f_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq, dtype=t.dtype, order='F')
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq, dtype=t.dtype)
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_.c, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_f_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq, dtype=t.dtype, order='F')
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_.copy, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq, dtype=t.dtype)
a = self.array([len(self.num23seq), len(self.num23seq[0])],
intent.in_.c.copy, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_in_cache_from_2casttype(self):
for t in self.type.all_types():
if t.elsize != self.type.elsize:
continue
obj = array(self.num2seq, dtype=t.dtype)
shape = (len(self.num2seq),)
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
a = self.array(shape, intent.in_.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
obj = array(self.num2seq, dtype=t.dtype, order='F')
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
a = self.array(shape, intent.in_.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
try:
a = self.array(shape, intent.in_.cache, obj[::-1])
except ValueError as msg:
if not str(msg).startswith('failed to initialize'
' intent(cache) array'):
raise
else:
raise SystemError(
'intent(cache) should have failed on multisegmented array')
def test_in_cache_from_2casttype_failure(self):
for t in self.type.all_types():
if t.elsize >= self.type.elsize:
continue
obj = array(self.num2seq, dtype=t.dtype)
shape = (len(self.num2seq),)
try:
self.array(shape, intent.in_.cache, obj) # Should succeed
except ValueError as msg:
if not str(msg).startswith('failed to initialize'
' intent(cache) array'):
raise
else:
raise SystemError(
'intent(cache) should have failed on smaller array')
def test_cache_hidden(self):
shape = (2,)
a = self.array(shape, intent.cache.hide, None)
assert_(a.arr.shape == shape)
shape = (2, 3)
a = self.array(shape, intent.cache.hide, None)
assert_(a.arr.shape == shape)
shape = (-1, 3)
try:
a = self.array(shape, intent.cache.hide, None)
except ValueError as msg:
if not str(msg).startswith('failed to create intent'
'(cache|hide)|optional array'):
raise
else:
raise SystemError(
'intent(cache) should have failed on undefined dimensions')
def test_hidden(self):
shape = (2,)
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
shape = (2, 3)
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
shape = (2, 3)
a = self.array(shape, intent.c.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
shape = (-1, 3)
try:
a = self.array(shape, intent.hide, None)
except ValueError as msg:
if not str(msg).startswith('failed to create intent'
'(cache|hide)|optional array'):
raise
else:
raise SystemError('intent(hide) should have failed'
' on undefined dimensions')
def test_optional_none(self):
shape = (2,)
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
shape = (2, 3)
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
shape = (2, 3)
a = self.array(shape, intent.c.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
def test_optional_from_2seq(self):
obj = self.num2seq
shape = (len(obj),)
a = self.array(shape, intent.optional, obj)
assert_(a.arr.shape == shape)
assert_(not a.has_shared_memory())
def test_optional_from_23seq(self):
obj = self.num23seq
shape = (len(obj), len(obj[0]))
a = self.array(shape, intent.optional, obj)
assert_(a.arr.shape == shape)
assert_(not a.has_shared_memory())
a = self.array(shape, intent.optional.c, obj)
assert_(a.arr.shape == shape)
assert_(not a.has_shared_memory())
def test_inplace(self):
obj = array(self.num23seq, dtype=self.type.dtype)
assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
assert_(obj[1][2] == a.arr[1][2] ==
array(54, dtype=self.type.dtype), repr((obj, a.arr)))
assert_(a.arr is obj)
assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace!
assert_(not obj.flags['CONTIGUOUS'])
def test_inplace_from_casttype(self):
for t in self.type.cast_types():
if t is self.type:
continue
obj = array(self.num23seq, dtype=t.dtype)
assert_(obj.dtype.type == t.dtype)
assert_(obj.dtype.type is not self.type.dtype)
assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
assert_(obj[1][2] == a.arr[1][2] ==
array(54, dtype=self.type.dtype), repr((obj, a.arr)))
assert_(a.arr is obj)
assert_(obj.flags['FORTRAN']) # obj attributes changed inplace!
assert_(not obj.flags['CONTIGUOUS'])
assert_(obj.dtype.type is self.type.dtype) # obj changed inplace!
for t in _type_names:
exec('''\
class test_%s_gen(unittest.TestCase,
_test_shared_memory
):
def setUp(self):
self.type = Type(%r)
array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
''' % (t, t, t))
if __name__ == "__main__":
setup()
run_module_suite()
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/numpy/f2py/tests/test_array_from_pyobj.py | Python | mit | 22,124 |
from __future__ import absolute_import
import logging
import re
import pip
from pip.req import InstallRequirement
from pip.req.req_file import COMMENT_RE
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
logger = logging.getLogger(__name__)
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
default_vcs=None,
isolated=False,
wheel_cache=None,
skip=()):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links
)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
)
continue
installations[req.name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set()
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file [%s] contains %s, but that "
"package is not installed",
req_file_path, COMMENT_RE.sub('', line).strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
| BitWriters/Zenith_project | zango/lib/python3.5/site-packages/pip/operations/freeze.py | Python | mit | 5,194 |
# -*- coding: utf-8 -*-
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
import warnings
from argparse import ArgumentParser
from optparse import OptionParser
import django
from django.core import checks
from django.core.management.color import color_style, no_style
from django.db import connections
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<app_label
app_label ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
Deprecated and will be removed in Django 1.10.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = ()
help = ''
args = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
@property
def use_argparse(self):
return not bool(self.option_list)
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
if not self.use_argparse:
def store_as_int(option, opt_str, value, parser):
setattr(parser.values, option.dest, int(value))
# Backwards compatibility: use deprecated optparse module
warnings.warn("OptionParser usage for Django management commands "
"is deprecated, use ArgumentParser instead",
RemovedInDjango110Warning)
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version())
parser.add_option('-v', '--verbosity', action='callback', dest='verbosity', default=1,
type='choice', choices=['0', '1', '2', '3'], callback=store_as_int,
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_option('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_option('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
for opt in self.option_list:
parser.add_option(opt)
else:
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
if self.args:
# Keep compatibility and always accept positional arguments, like optparse when args is set
parser.add_argument('args', nargs='*')
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
if self.use_argparse:
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
else:
options, args = parser.parse_args(argv[2:])
cmd_options = vars(options)
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options.get('no_color'):
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if (self.requires_system_checks and
not options.get('skip_validation') and # Remove at the end of deprecation for `skip_validation`.
not options.get('skip_checks')):
self.check()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious() and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def __init__(self):
warnings.warn(
"NoArgsCommand class is deprecated and will be removed in Django 1.10. "
"Use BaseCommand instead, which takes no arguments by default.",
RemovedInDjango110Warning
)
super(NoArgsCommand, self).__init__()
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
| DONIKAN/django | django/core/management/base.py | Python | bsd-3-clause | 23,884 |
from django import template
from django.apps import apps
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
if apps.is_installed('django.contrib.staticfiles'):
from django.contrib.staticfiles.storage import staticfiles_storage
return staticfiles_storage.url(path)
else:
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
"""
Given a relative path to a static asset, return the absolute path to the
asset.
"""
return StaticNode.handle_simple(path)
| KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/templatetags/static.py | Python | gpl-3.0 | 4,391 |
urlpatterns = []
handler404 = 'csrf_tests.views.csrf_token_error_handler'
| nesdis/djongo | tests/django_tests/tests/v22/tests/csrf_tests/csrf_token_error_handler_urls.py | Python | agpl-3.0 | 75 |
"""Class for storing shared keys."""
from utils.cryptomath import *
from utils.compat import *
from mathtls import *
from Session import Session
from BaseDB import BaseDB
class SharedKeyDB(BaseDB):
"""This class represent an in-memory or on-disk database of shared
keys.
A SharedKeyDB can be passed to a server handshake function to
authenticate a client based on one of the shared keys.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new SharedKeyDB.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "shared key")
def _getItem(self, username, valueStr):
session = Session()
session._createSharedKey(username, valueStr)
return session
def __setitem__(self, username, sharedKey):
"""Add a shared key to the database.
@type username: str
@param username: The username to associate the shared key with.
Must be less than or equal to 16 characters in length, and must
not already be in the database.
@type sharedKey: str
@param sharedKey: The shared key to add. Must be less than 48
characters in length.
"""
BaseDB.__setitem__(self, username, sharedKey)
def _setItem(self, username, value):
if len(username)>16:
raise ValueError("username too long")
if len(value)>=48:
raise ValueError("shared key too long")
return value
def _checkItem(self, value, username, param):
newSession = self._getItem(username, param)
return value.masterSecret == newSession.masterSecret | edisonlz/fruit | web_project/base/site-packages/gdata/tlslite/SharedKeyDB.py | Python | apache-2.0 | 1,914 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
from ctypes import windll, wintypes
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class LinkLock(object):
"""A flock-style lock to limit the number of concurrent links to one.
Uses a session-local mutex based on the file's directory.
"""
def __enter__(self):
name = 'Local\\%s' % BASE_DIR.replace('\\', '_').replace(':', '_')
self.mutex = windll.kernel32.CreateMutexW(
wintypes.c_int(0),
wintypes.c_int(0),
wintypes.create_unicode_buffer(name))
assert self.mutex
result = windll.kernel32.WaitForSingleObject(
self.mutex, wintypes.c_int(0xFFFFFFFF))
# 0x80 means another process was killed without releasing the mutex, but
# that this process has been given ownership. This is fine for our
# purposes.
assert result in (0, 0x80), (
"%s, %s" % (result, windll.kernel32.GetLastError()))
def __exit__(self, type, value, traceback):
windll.kernel32.ReleaseMutex(self.mutex)
windll.kernel32.CloseHandle(self.mutex)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
with LinkLock():
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return popen.returncode
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
args = open(rspfile).read()
dir = dir[0] if dir else None
popen = subprocess.Popen(args, shell=True, env=env, cwd=dir)
popen.wait()
return popen.returncode
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| nikste/visualizationDemo | zeppelin-web/node/npm/node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | Python | apache-2.0 | 6,979 |
from tests import unittest
from tests import mock
from unbound_ec2 import server
from tests import attrs
class TestServer(server.Server):
HANDLE_FORWARD_RESULT = 'dummy_handle_forward'
HANDLE_PASS_RESULT = True
DNSMSG = mock.MagicMock()
def handle_request(self, _id, event, qstate, qdata, request_type):
return self.HANDLE_FORWARD_RESULT
def new_dns_msg(self, qname):
return self.DNSMSG
class TestAbstractServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 'bogus_ttl'
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_operate_event_new(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qdata = mock.MagicMock()
qstate.qinfo.qname_str = "fqdn.not-bogus.tld"
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
def test_operate_event_pass(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_PASS']
qstate = mock.MagicMock()
qdata = mock.MagicMock()
qstate.qinfo.qname_str = "fqdn.not-bogus.tld"
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
def test_operate_event_moddone(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_MODDONE']
qstate = mock.MagicMock()
qdata = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED'])
def test_operate_forward(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT)
qstate.qinfo.qtype = attrs['RR_TYPE_ANY']
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT)
def test_forwarded_zones(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
forwarded_zones = '.subdomain%s' % self.zone
self.srv2 = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, forwarded_zones)
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s' % self.forwarded_zones
qdata = mock.MagicMock()
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_PASS_RESULT)
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
class TestAuthoritativeServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 'bogus_ttl'
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = server.Authoritative(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order,
self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_handle_forward(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
server.DNSMessage = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
def test_handle_empty(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_TXT']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
server.DNSMessage = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
class TestCachingServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
self.lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 88888881
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = server.Caching(self.zone, self.reverse_zone, self.ttl, self.lookup_mock, self.ip_order,
self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_handle_forward(self):
server.storeQueryInCache = mock.Mock()
server.DNSMessage = mock.MagicMock()
instances_mock = mock.MagicMock()
instances_mock.tags = {'Address': 'bogus_ip_address'}
self.lookup_mock.lookup.return_value = [instances_mock]
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED'])
self.assertEqual(qstate.return_msg.rep.security, 2)
server.DNSMessage.return_value.answer.append.assert_called_with(
'%s %d IN A %s' % (qstate.qinfo.qname_str, self.ttl, 'bogus_ip_address'))
| unibet/unbound-ec2 | tests/unit/test_server.py | Python | isc | 6,080 |
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.optimize import curve_fit
def countKey(key,listDataDicts):
outDict = {}
for row in listDataDicts:
try:
outDict[row[key]] += 1
except KeyError:
outDict[row[key]] = 1
return outDict
def avgUse30Days(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if int(person[key]) < 31 :
totalDays += int(person[key])
numberUsers += 1
return (1.0*totalDays/numberUsers)
def avgUse30DaysWithZeros(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if ( int(person[key]) < 31 ):
totalDays += int(person[key])
numberUsers += 1
elif ( int(person[key]) == 93 ):
numberUsers += 1
else:
pass
return (1.0*totalDays/numberUsers)
def powerLaw(x,a,b):
return a*(x**(-b))
def expDecay(x,a,b):
return a*np.exp(b*x)
listDataDicts = []
with open('34933-0001-Data.tsv', 'rb') as tsvFile:
tsvReader = csv.DictReader(tsvFile,delimiter='\t')
for row in tsvReader:
listDataDicts.append(row)
ageFirstUseKeys = ['CIGTRY', 'SNUFTRY', 'CHEWTRY', 'CIGARTRY', 'ALCTRY', 'MJAGE', 'COCAGE', 'HERAGE', 'HALLAGE', 'INHAGE', 'ANALAGE', 'TRANAGE', 'STIMAGE', 'SEDAGE']
useLast30Keys = ['CIG30USE','SNF30USE','CHW30USE','CGR30USE','ALCDAYS','MJDAY30A','COCUS30A','HER30USE','HAL30USE','INHDY30A','PRDAYPMO','TRDAYPMO','STDAYPMO','SVDAYPMO']
xdata = []
ydata = []
for person in listDataDicts:
for i in range(len(ageFirstUseKeys)):
if (int(person[ageFirstUseKeys[i]]) < 900) and (int(person[useLast30Keys[i]]) < 31):
xdata.append(int(person[ageFirstUseKeys[i]]))
ydata.append(int(person[useLast30Keys[i]]))
slope,intercept,rValue,pValue,stdErr = stats.linregress(xdata,ydata)
print "Drug First Use Age vs Usage Frequency Linear Regression"
print "Slope: %f, Intercept: %f, RSQ-Value: %f, P-Value: %f, Standard Error: %f,\n 95%% Confidence Interval: %f +- %f\n" %(slope,intercept,rValue*rValue,pValue,stdErr, slope, 1.96*stdErr)
'''# Curve fit with a power law
xfit = range(90)
popt1, pcov1 = curve_fit(powerLaw, xdata, ydata)
print "Power Law Curve fit: ",popt1,np.sqrt(np.diag(pcov1)),"\n"
fitLiney1 = np.zeros(len(xfit))
for i in range(len(xfit)):
fitLiney1[i] = powerLaw( xfit[i], popt1[0], popt1[1] )
'''
xdata2 = [ x for x in range(89) ]
ydata2 = [ (x*slope + intercept) for x in range(89) ]
plt.plot(xdata,ydata,'b.',xdata2,ydata2,'r-')
plt.title("Age of First Use vs Usage in the Last 30 Days")
plt.xlabel("Age of First Use")
plt.ylabel("Usage in the Past 30 Days)")
plt.legend(["Data","Linear Fit"])
plt.xlim(0,90)
plt.ylim(0,31)
plt.tight_layout()
plt.show() | cvanoort/USDrugUseAnalysis | Report1/Code/afu_use30.py | Python | isc | 2,851 |
import sys
import types
import typing as t
import decorator as deco
from gssapi.raw.misc import GSSError
if t.TYPE_CHECKING:
from gssapi.sec_contexts import SecurityContext
def import_gssapi_extension(
name: str,
) -> t.Optional[types.ModuleType]:
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def inquire_property(
name: str,
doc: t.Optional[str] = None
) -> property:
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self: "SecurityContext") -> t.Any:
if not self._started:
msg = (f"Cannot read {name} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding() -> str:
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(
enc: str,
) -> None:
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(
d: t.Dict[t.Union[bytes, str], t.Union[bytes, str]],
) -> t.Dict[bytes, bytes]:
"""Encodes any relevant strings in a dict"""
def enc(x: t.Union[bytes, str]) -> bytes:
if isinstance(x, str):
return x.encode(_ENCODING)
else:
return x
return {enc(k): enc(v) for k, v in d.items()}
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[bytes]:
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
defer_step_errors = getattr(self, '__DEFER_STEP_ERRORS__', False)
if e.token is not None and defer_step_errors:
self._last_err = e
# skip the "return func" line above in the traceback
tb = e.__traceback__.tb_next # type: ignore[union-attr]
self._last_err.__traceback__ = tb
return e.token
else:
raise
@deco.decorator
def check_last_err(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(
cls,
name: str,
parents: t.Tuple[t.Type],
attrs: t.Dict[str, t.Any],
) -> "CheckLastError":
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
| pythongssapi/python-gssapi | gssapi/_utils.py | Python | isc | 5,004 |
from __future__ import division
from libtbx.test_utils import approx_equal
from libtbx.utils import Usage
from libtbx import easy_run
import libtbx.load_env
import platform
import time
import sys, os
op = os.path
__this_script__ = "cctbx_project/fable/test/sf_times.py"
# based on cctbx_project/compcomm/newsletter09/sf_times.py
setup_dir = "/net/cci/setup/Linux"
ifort_versions = ["intel121.sh", "intel111.sh", "ifort91.sh"]
icc_versions = [
"intel121.sh",
"intel111.sh",
"icc101.sh",
"icc91.sh"]
gcc_versions = [
"gcc-4.6.1_fc8.sh",
"gcc-4.5.3_fc8.sh",
"gcc-4.4.6_fc8.sh",
"gcc-4.3.6_fc8.sh",
"gcc-4.2.4_fc8.sh"]
fortran_template = r"""C %(this_script)s
subroutine cos_wrapper(result, arg)
REAL result
REAL arg
result = COS(arg)
return
end
subroutine exp_wrapper(result, arg)
REAL result
REAL arg
result = EXP(arg)
return
end
subroutine sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc)
implicit none
REAL abcss(3)
integer n_scatt
REAL xyz(3, *)
REAL b_iso(*)
integer n_refl
integer hkl(3, *)
REAL f_calc(2, *)
integer i_refl, i_scatt, j, h
REAL phi, cphi, sphi, dss, ldw, dw, a, b
DO i_refl=1,n_refl
a = 0
b = 0
DO i_scatt=1,n_scatt
phi = 0
DO j=1,3
phi = phi + hkl(j,i_refl) * xyz(j,i_scatt)
enddo
phi = phi * 2 * 3.1415926535897931
call cos_wrapper(cphi, phi)
call cos_wrapper(sphi, phi - 3.1415926535897931*0.5)
dss = 0
DO j=1,3
h = hkl(j,i_refl)
dss = dss + h*h * abcss(j)
enddo
ldw = -0.25 * dss * b_iso(i_scatt)
call exp_wrapper(dw, ldw)
a = a + dw * cphi
b = b + dw * sphi
enddo
f_calc(1, i_refl) = a
f_calc(2, i_refl) = b
enddo
return
end
program run
implicit none
REAL abcss(3)
integer n_scatt
parameter(n_scatt=%(n_scatt)s)
REAL xyz(3, n_scatt)
REAL b_iso(n_scatt)
integer n_refl
parameter(n_refl=%(n_refl)s)
integer hkl(3, n_refl)
REAL f_calc(2, n_refl)
integer i, j, jr
REAL a, b, max_a, max_b
abcss(1) = 1/(11.0*11.0)
abcss(2) = 1/(12.0*12.0)
abcss(3) = 1/(13.0*13.0)
jr = 0
DO i=1,n_scatt
DO j=1,3
jr = mod(jr*1366+150889, 714025)
xyz(j,i) = (mod(jr, 20000) - 10000) / 10000.0
enddo
enddo
DO i=1,n_scatt
jr = mod(jr*1366+150889, 714025)
b_iso(i) = mod(jr, 10000) / 100.0
enddo
if (n_scatt .le. 10) then
DO i=1,n_scatt
write(6, '(4(1x,f9.6))')
& xyz(1,i), xyz(2,i), xyz(3, i), b_iso(i)
enddo
endif
DO i=1,n_refl
DO j=1,3
jr = mod(jr*1366+150889, 714025)
hkl(j,i) = mod(jr, 10) - 5
enddo
enddo
call sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc)
if (n_refl .le. 100) then
DO i=1,n_refl
write(6, '(3(1x,i3),1x,f12.6,1x,f12.6)')
& hkl(1,i), hkl(2,i), hkl(3,i),
& f_calc(1,i), f_calc(2,i)
enddo
else
max_a = 0
max_b = 0
DO i=1,n_refl
a = f_calc(1,i)
b = f_calc(2,i)
if (max_a .lt. a) max_a = a
if (max_b .lt. b) max_b = b
enddo
write(6, '(2(1x,f12.6))') max_a, max_b
endif
end
"""
def compare_with_cctbx_structure_factors(n_scatt, n_refl, output_lines):
from cctbx import xray
from cctbx import miller
from cctbx import crystal
from cctbx.array_family import flex
crystal_symmetry = crystal.symmetry(
unit_cell=(11,12,13,90,90,90),
space_group_symbol="P1")
scatterers = flex.xray_scatterer()
miller_indices = flex.miller_index()
f_calc = flex.complex_double()
for line in output_lines:
flds = line.split()
assert len(flds) in [4,5]
if (len(flds) == 4):
x,y,z,b_iso = [float(s) for s in flds]
scatterers.append(
xray.scatterer(site=(x,y,z), b=b_iso, scattering_type="const"))
else:
miller_indices.append([int(s) for s in flds[:3]])
f_calc.append(complex(float(flds[3]), float(flds[4])))
assert scatterers.size() == n_scatt
assert miller_indices.size() == n_refl
xs = xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=scatterers)
fc = miller_array = miller.set(
crystal_symmetry=crystal_symmetry,
indices=miller_indices,
anomalous_flag=False).array(data=f_calc)
fc2 = fc.structure_factors_from_scatterers(
xray_structure=xs,
algorithm="direct",
cos_sin_table=False).f_calc()
for f1,f2 in zip(fc.data(), fc2.data()):
assert approx_equal(f1, f2, eps=1e-5)
def build_run(
setup_cmd, ld_preload_flag, n_scatt, n_refl, build_cmd, check_max_a_b):
if (op.isfile("a.out")):
os.remove("a.out")
assert not op.isfile("a.out")
print build_cmd
buffers = easy_run.fully_buffered(command=build_cmd)
msg = buffers.format_errors_if_any()
if (msg is not None):
if (0):
print build_cmd
print
print msg
print
STOP()
return None
assert op.isfile("a.out")
run_cmd = setup_cmd
if (ld_preload_flag):
run_cmd += 'env LD_PRELOAD='\
'"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libimf.so:"'\
'"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libirc.so" '
utimes = []
run_cmd += '/usr/bin/time -p ./a.out'
def run_once():
buffers = easy_run.fully_buffered(command=run_cmd)
if (len(buffers.stderr_lines) != 3):
print "v"*79
print "\n".join(buffers.stderr_lines)
print "^"*79
raise RuntimeError(
"Unexpected number of output lines"
" (3 expected; acutal output see above).")
if (n_scatt == 0):
pass
elif (n_scatt <= 10 and n_refl <= 100):
assert len(buffers.stdout_lines) == n_scatt + n_refl
else:
assert len(buffers.stdout_lines) == 1
max_a, max_b = [float(s) for s in buffers.stdout_lines[0].split()]
if (check_max_a_b):
if (n_scatt == 2000 and n_refl == 20000):
assert approx_equal(max_a, 35.047157, eps=1e-4)
assert approx_equal(max_b, 25.212738, eps=1e-4)
elif (n_scatt == 100 and n_refl == 1000):
assert approx_equal(max_a, 4.493645, eps=1e-4)
assert approx_equal(max_b, 10.515532, eps=1e-4)
elif (n_scatt <= 10 and n_refl <= 100):
if (libtbx.env.has_module(name="cctbx")):
compare_with_cctbx_structure_factors(
n_scatt=n_scatt,
n_refl=n_refl,
output_lines=buffers.stdout_lines)
else:
raise RuntimeError, (max_a, max_b)
utime = float(buffers.stderr_lines[1].split()[1])
utimes.append(utime)
print "sample utime: %.2f" % utime
sys.stdout.flush()
for _ in xrange(8):
run_once()
return min(utimes)
def finalize_cpp_build_cmd(source_cpp):
from fable import simple_compilation
comp_env = simple_compilation.environment()
return comp_env.assemble_include_search_paths(no_quotes=False) \
+ " " + source_cpp
def write_build_run(
setup_cmd, ld_preload_flag, n_scatt, n_refl, real, lang, build_cmd,
replace_cos, replace_exp):
this_script = __this_script__
for_txt = fortran_template % vars()
if (replace_cos):
for_txt = for_txt.replace(
"COS(arg)",
"arg / (abs(arg)+1.0)")
if (replace_exp):
for_txt = for_txt.replace(
"EXP(arg)",
"max(0.0, 1.0 - arg*arg)")
for_txt = for_txt.replace("REAL", real)
open("tmp.f", "w").write(for_txt)
from fable import cout
cpp_txt = cout.process(
file_names=["tmp.f"],
namespace="sf_test",
fem_do_safe=False,
inline_all=True)
open("tmp.cpp", "w").write("\n".join(cpp_txt)+"\n")
if (lang.lower() == "f"):
build_cmd += " tmp.f"
elif (lang.lower() == "c"):
build_cmd += finalize_cpp_build_cmd("tmp.cpp")
else:
raise RuntimeError('Unknown lang: "%s"' % lang)
return build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
build_cmd=build_cmd,
check_max_a_b=(not (replace_cos or replace_exp)))
def run_combinations(
compiler_versions,
all_utimes,
n_scatt,
n_refl,
compiler_build_opts_list,
real_list):
for lang,setup_sh_list,compiler,build_opts in compiler_build_opts_list:
for setup_sh in setup_sh_list:
if (setup_sh is None):
setup_cmd = ""
else:
setup_cmd = ". %s/%s; " % (setup_dir, setup_sh)
compiler_version = easy_run.fully_buffered(
command=setup_cmd+compiler+" --version",
join_stdout_stderr=True).stdout_lines[0]
if (lang in ["f", "c"]):
ld_preload_flags = [False, True]
else:
ld_preload_flags = [False]
for ld_preload_flag in ld_preload_flags:
iml = ["", " Intel Math Lib"][int(ld_preload_flag)]
compiler_versions.append(compiler_version + iml)
build_cmd = " ".join([setup_cmd+compiler, build_opts])
print build_cmd
utimes = []
if (n_scatt != 0):
for real in real_list:
print " %s" % real
for replace_cos in [False, True]:
print " replace_cos", replace_cos
for replace_exp in [False, True]:
print " replace_exp", replace_exp
sys.stdout.flush()
if (compiler_version != "n/a"):
utime = write_build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
real=real,
lang=lang,
build_cmd=build_cmd,
replace_cos=replace_cos,
replace_exp=replace_exp)
if (utime is not None):
print " %4.2f" % utime
else:
utime = -1.0
print " err"
else:
utime = -1.0
print " n/a"
utimes.append(utime)
sys.stdout.flush()
else:
if (lang.lower() == "f"):
f_source = libtbx.env.find_in_repositories(
relative_path="lapack_fem/dsyev_test.f",
test=op.isfile,
optional=False)
build_cmd_compl = build_cmd + " " + f_source
else:
cpp_source = libtbx.env.find_in_repositories(
relative_path="lapack_fem/dsyev_test.cpp",
test=op.isfile,
optional=False)
build_cmd_compl = build_cmd + finalize_cpp_build_cmd(cpp_source)
utime = build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
build_cmd=build_cmd_compl,
check_max_a_b=False)
if (utime is None):
print "err"
utime = -1.0
else:
print "min utime: %.2f" % utime
sys.stdout.flush()
utimes.append(utime)
all_utimes.append((utimes, build_cmd + iml))
def usage():
raise Usage("fable.python sf_times.py unit_test|quick|production")
def run(args):
if (len(args) != 1): usage()
t_start = time.time()
build_platform = platform.platform()
build_node = platform.node()
compiler_versions = []
if (args[0] == "unit_test"):
n_scatt, n_refl = 10, 100
elif (args[0] == "quick"):
n_scatt, n_refl = 100, 1000
elif (args[0] == "production"):
n_scatt, n_refl = 2000, 20000
elif (args[0] == "dsyev"):
n_scatt, n_refl = 0, 0
else:
usage()
gcc_sh = gcc_versions + [None]
icc_sh = icc_versions
if (args[0] == "quick"):
gcc_sh = gcc_sh[:2]
icc_sh = icc_sh[:1]
all_utimes = []
run_combinations(
compiler_versions,
all_utimes,
n_scatt=n_scatt,
n_refl=n_refl,
compiler_build_opts_list=[
("F", ifort_versions, "ifort", "-O"),
("f", gcc_sh, "gfortran", "-O3 -ffast-math"),
("f", gcc_sh, "gfortran", "-O3 -ffast-math -march=native"),
("C", icc_sh, "icpc", "-O"),
("c", gcc_sh, "g++", "-O3 -ffast-math"),
("c", gcc_sh, "g++", "-O3 -ffast-math -march=native"),
("c", [None], "clang++",
"-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math"),
("c", [None], "clang++",
"-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math"
" -march=native")],
real_list=["real*4", "real*8"])
print
print "current_platform:", platform.platform()
print "current_node:", platform.node()
print "build_platform:", build_platform
print "build_node:", build_node
for compiler_version in compiler_versions:
print "compiler:", compiler_version
if (n_scatt != 0):
print "n_scatt * n_refl: %d * %d" % (n_scatt, n_refl)
print '''\
"s" or "d": single-precision or double-precision floating-point variables
"E" or "e": using the library exp(arg) function or "max(0.0, 1.0 - arg*arg)"
"C" or "c": using the library cos(arg) function or "arg / (abs(arg)+1.0)"'''
print " sEC seC sEc sec dEC deC dEc dec"
else:
print "dsyev times:"
useful_utimes = []
for utimes,build_cmd in all_utimes:
if (max(utimes) != -1.0):
print " ".join(["%6.2f" % u for u in utimes]), build_cmd
useful_utimes.append((utimes,build_cmd))
if (len(useful_utimes) > 1):
print "Relative to first:"
for utimes,build_cmd in useful_utimes:
print " ".join(["%6.2f" % (u/max(u0,0.01))
for u,u0 in zip(utimes,useful_utimes[0][0])]), build_cmd
print "Wall clock time: %.2f s" % (time.time()-t_start)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| hickerson/bbn | fable/fable_sources/fable/test/sf_times.py | Python | mit | 13,934 |
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerCoffeeButtonV1Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'mug_pos': obs[3:6],
'unused_info': obs[6:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)
action['grab_effort'] = -1.
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_mug = o_d['mug_pos'] + np.array([.0, .0, .01])
if abs(pos_curr[0] - pos_mug[0]) > 0.02:
return np.array([pos_mug[0], pos_curr[1], .28])
else:
return pos_curr + np.array([.0, .1, .0])
| rlworkgroup/metaworld | metaworld/policies/sawyer_coffee_button_v1_policy.py | Python | mit | 1,025 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertEqual(dict(), empty_dict)
self.assertEqual(0, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
babel_fish['one'] = 'eins'
expected = {'two': 'dos', 'one': __}
self.assertEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = {'one': 'uno', 'two': 'dos'}
dict2 = {'two': 'dos', 'one': 'uno'}
self.assertEqual(____, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(
('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf',
'confused looking zebra'),
42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])
| DarthStrom/python_koans | python2/koans/about_dictionaries.py | Python | mit | 1,970 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/start_gym_battle_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/start_gym_battle_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/start_gym_battle_message.proto\x12\'pogoprotos.networking.requests.messages\"\x97\x01\n\x15StartGymBattleMessage\x12\x0e\n\x06gym_id\x18\x01 \x01(\t\x12\x1d\n\x15\x61ttacking_pokemon_ids\x18\x02 \x03(\x06\x12\x1c\n\x14\x64\x65\x66\x65nding_pokemon_id\x18\x03 \x01(\x06\x12\x17\n\x0fplayer_latitude\x18\x04 \x01(\x01\x12\x18\n\x10player_longitude\x18\x05 \x01(\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STARTGYMBATTLEMESSAGE = _descriptor.Descriptor(
name='StartGymBattleMessage',
full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gym_id', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.gym_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attacking_pokemon_ids', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.attacking_pokemon_ids', index=1,
number=2, type=6, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='defending_pokemon_id', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.defending_pokemon_id', index=2,
number=3, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.player_latitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.player_longitude', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=267,
)
DESCRIPTOR.message_types_by_name['StartGymBattleMessage'] = _STARTGYMBATTLEMESSAGE
StartGymBattleMessage = _reflection.GeneratedProtocolMessageType('StartGymBattleMessage', (_message.Message,), dict(
DESCRIPTOR = _STARTGYMBATTLEMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.start_gym_battle_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.StartGymBattleMessage)
))
_sym_db.RegisterMessage(StartGymBattleMessage)
# @@protoc_insertion_point(module_scope)
| bellowsj/aiopogo | aiopogo/pogoprotos/networking/requests/messages/start_gym_battle_message_pb2.py | Python | mit | 4,200 |
from ..workspace import Block
from twisted.internet import defer
from .variables import lexical_variable
import operator
class logic_null (Block):
def eval (self):
return defer.succeed(None)
class logic_boolean (Block):
def eval (self):
return defer.succeed(self.fields['BOOL'] == 'TRUE')
class logic_negate (Block):
outputType = bool
def eval (self):
def negate (result):
if result is None:
return None
return result == False
self._complete = self.getInputValue('BOOL').addCallback(negate)
return self._complete
_operators_map = {
"EQ": operator.eq,
"NEQ": operator.ne,
"LT": operator.lt,
"LTE": operator.le,
"GT": operator.gt,
"GTE": operator.ge
}
def _compare (lhs, rhs, op_id):
if lhs is None or rhs is None:
return None
op = _operators_map[op_id]
return op(lhs, rhs)
# Emit a warning if bad op given
class logic_compare (Block):
outputType = bool
def eval (self):
lhs = self.getInputValue('A')
rhs = self.getInputValue('B')
op_id = self.fields['OP']
def _eval (results):
lhs, rhs = results
return _compare(lhs, rhs, op_id)
self._complete = defer.gatherResults([lhs, rhs]).addCallback(_eval)
return self._complete
class lexical_variable_compare (lexical_variable):
outputType = bool
def eval (self):
variable = self._getVariable()
if variable is None:
self.emitLogMessage(
"Unknown variable: " + str(self.getFieldValue('VAR')),
"error"
)
return defer.succeed(None)
value = self.getFieldValue('VALUE')
op_id = self.getFieldValue('OP')
unit = self.getFieldValue('UNIT', None)
if isinstance(unit, (int, float)):
value *= unit
return defer.succeed(_compare(variable.value, value, op_id))
class logic_operation (Block):
outputType = bool
def eval (self):
@defer.inlineCallbacks
def _run ():
op = self.fields['OP']
lhs = yield self.getInputValue('A')
if lhs is None:
return
if op == "AND":
if bool(lhs):
rhs = yield self.getInputValue('B')
if rhs is None:
return
defer.returnValue(bool(rhs))
else:
defer.returnValue(False)
elif op == "OR":
if bool(lhs):
defer.returnValue(True)
else:
rhs = yield self.getInputValue('B')
if rhs is None:
return
defer.returnValue(bool(rhs))
# Emit a warning
return
self._complete = _run()
return self._complete
class logic_ternary (Block):
# TODO: outputType of then and else should be the same.
# this is then the outputType of the logic_ternary block.
def eval (self):
@defer.inlineCallbacks
def _run ():
test = yield self.getInputValue('IF')
if test is None:
return
if bool(test):
result = yield self.getInputValue('THEN')
defer.returnValue(result)
else:
result = yield self.getInputValue('ELSE')
defer.returnValue(result)
self._complete = _run()
return self._complete
| richardingham/octopus | octopus/blocktopus/blocks/logic.py | Python | mit | 2,889 |
"""
Hack to get scripts to run from source checkout without having to set
PYTHONPATH.
"""
import sys
from os.path import dirname, join, abspath
db_path = dirname(__file__)
project_path = abspath(join(db_path, ".."))
sys.path.insert(0, project_path)
| bd4/monster-hunter-scripts | db/_pathfix.py | Python | mit | 251 |
# Copyright (c) 2017 https://github.com/ping
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
__version__ = '0.3.9'
| ping/instagram_private_api_extensions | instagram_private_api_extensions/__init__.py | Python | mit | 159 |
'''
Grid Layout
===========
.. only:: html
.. image:: images/gridlayout.gif
:align: right
.. only:: latex
.. image:: images/gridlayout.png
:align: right
.. versionadded:: 1.0.4
The :class:`GridLayout` arranges children in a matrix. It takes the available
space and divides it into columns and rows, then adds widgets to the resulting
"cells".
.. versionchanged:: 1.0.7
The implementation has changed to use the widget size_hint for calculating
column/row sizes. `uniform_width` and `uniform_height` have been removed
and other properties have added to give you more control.
Background
----------
Unlike many other toolkits, you cannot explicitly place a widget in a specific
column/row. Each child is automatically assigned a position determined by the
layout configuration and the child's index in the children list.
A GridLayout must always have at least one input constraint:
:attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols
or rows, the Layout will throw an exception.
Column Width and Row Height
---------------------------
The column width/row height are determined in 3 steps:
- The initial size is given by the :attr:`col_default_width` and
:attr:`row_default_height` properties. To customize the size of a single
column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`.
- The `size_hint_x`/`size_hint_y` of the children are taken into account.
If no widgets have a size hint, the maximum size is used for all
children.
- You can force the default size by setting the :attr:`col_force_default`
or :attr:`row_force_default` property. This will force the layout to
ignore the `width` and `size_hint` properties of children and use the
default size.
Using a GridLayout
------------------
In the example below, all widgets will have an equal size. By default, the
`size_hint` is (1, 1), so a Widget will take the full size of the parent::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1'))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2'))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_1.jpg
Now, let's fix the size of Hello buttons to 100px instead of using
size_hint_x=1::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_2.jpg
Next, let's fix the row height to a specific size::
layout = GridLayout(cols=2, row_force_default=True, row_default_height=40)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_3.jpg
'''
__all__ = ('GridLayout', 'GridLayoutException')
from kivy.logger import Logger
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \
BoundedNumericProperty, ReferenceListProperty, VariableListProperty, \
ObjectProperty, StringProperty
from math import ceil
def nmax(*args):
# merge into one list
args = [x for x in args if x is not None]
return max(args)
def nmin(*args):
# merge into one list
args = [x for x in args if x is not None]
return min(args)
class GridLayoutException(Exception):
'''Exception for errors if the grid layout manipulation fails.
'''
pass
class GridLayout(Layout):
'''Grid layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0, 0, 0].
'''
cols = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of columns in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
rows = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of rows in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to a BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
col_default_width = NumericProperty(0)
'''Default minimum size to use for a column.
.. versionadded:: 1.0.7
:attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
row_default_height = NumericProperty(0)
'''Default minimum size to use for row.
.. versionadded:: 1.0.7
:attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
col_force_default = BooleanProperty(False)
'''If True, ignore the width and size_hint_x of the child and use the
default column width.
.. versionadded:: 1.0.7
:attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
row_force_default = BooleanProperty(False)
'''If True, ignore the height and size_hint_y of the child and use the
default row height.
.. versionadded:: 1.0.7
:attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
cols_minimum = DictProperty({})
'''Dict of minimum width for each column. The dictionary keys are the
column numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
rows_minimum = DictProperty({})
'''Dict of minimum height for each row. The dictionary keys are the
row numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
minimum_width = NumericProperty(0)
'''Automatically computed minimum width needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_height = NumericProperty(0)
'''Automatically computed minimum height needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Automatically computed minimum size needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties. It is read
only.
'''
def __init__(self, **kwargs):
self._cols = self._rows = None
super(GridLayout, self).__init__(**kwargs)
fbind = self.fbind
update = self._trigger_layout
fbind('col_default_width', update)
fbind('row_default_height', update)
fbind('col_force_default', update)
fbind('row_force_default', update)
fbind('cols', update)
fbind('rows', update)
fbind('parent', update)
fbind('spacing', update)
fbind('padding', update)
fbind('children', update)
fbind('size', update)
fbind('pos', update)
def get_max_widgets(self):
if self.cols and self.rows:
return self.rows * self.cols
else:
return None
def on_children(self, instance, value):
# if that makes impossible to construct things with deffered method,
# migrate this test in do_layout, and/or issue a warning.
smax = self.get_max_widgets()
if smax and len(value) > smax:
raise GridLayoutException(
'Too many children in GridLayout. Increase rows/cols!')
def _init_rows_cols_sizes(self, count):
# the goal here is to calculate the minimum size of every cols/rows
# and determine if they have stretch or not
current_cols = self.cols
current_rows = self.rows
# if no cols or rows are set, we can't calculate minimum size.
# the grid must be contrained at least on one side
if not current_cols and not current_rows:
Logger.warning('%r have no cols or rows set, '
'layout is not triggered.' % self)
return
if current_cols is None:
current_cols = int(ceil(count / float(current_rows)))
elif current_rows is None:
current_rows = int(ceil(count / float(current_cols)))
current_cols = max(1, current_cols)
current_rows = max(1, current_rows)
self._has_hint_bound_x = False
self._has_hint_bound_y = False
self._cols_min_size_none = 0. # min size from all the None hint
self._rows_min_size_none = 0. # min size from all the None hint
self._cols = cols = [self.col_default_width] * current_cols
self._cols_sh = [None] * current_cols
self._cols_sh_min = [None] * current_cols
self._cols_sh_max = [None] * current_cols
self._rows = rows = [self.row_default_height] * current_rows
self._rows_sh = [None] * current_rows
self._rows_sh_min = [None] * current_rows
self._rows_sh_max = [None] * current_rows
# update minimum size from the dicts
items = (i for i in self.cols_minimum.items() if i[0] < len(cols))
for index, value in items:
cols[index] = max(value, cols[index])
items = (i for i in self.rows_minimum.items() if i[0] < len(rows))
for index, value in items:
rows[index] = max(value, rows[index])
return True
def _fill_rows_cols_sizes(self):
cols, rows = self._cols, self._rows
cols_sh, rows_sh = self._cols_sh, self._rows_sh
cols_sh_min, rows_sh_min = self._cols_sh_min, self._rows_sh_min
cols_sh_max, rows_sh_max = self._cols_sh_max, self._rows_sh_max
# calculate minimum size for each columns and rows
n_cols = len(cols)
has_bound_y = has_bound_x = False
for i, child in enumerate(reversed(self.children)):
(shw, shh), (w, h) = child.size_hint, child.size
shw_min, shh_min = child.size_hint_min
shw_max, shh_max = child.size_hint_max
row, col = divmod(i, n_cols)
# compute minimum size / maximum stretch needed
if shw is None:
cols[col] = nmax(cols[col], w)
else:
cols_sh[col] = nmax(cols_sh[col], shw)
if shw_min is not None:
has_bound_x = True
cols_sh_min[col] = nmax(cols_sh_min[col], shw_min)
if shw_max is not None:
has_bound_x = True
cols_sh_max[col] = nmin(cols_sh_max[col], shw_max)
if shh is None:
rows[row] = nmax(rows[row], h)
else:
rows_sh[row] = nmax(rows_sh[row], shh)
if shh_min is not None:
has_bound_y = True
rows_sh_min[row] = nmax(rows_sh_min[row], shh_min)
if shh_max is not None:
has_bound_y = True
rows_sh_max[row] = nmin(rows_sh_max[row], shh_max)
self._has_hint_bound_x = has_bound_x
self._has_hint_bound_y = has_bound_y
def _update_minimum_size(self):
# calculate minimum width/height needed, starting from padding +
# spacing
l, t, r, b = self.padding
spacing_x, spacing_y = self.spacing
cols, rows = self._cols, self._rows
width = l + r + spacing_x * (len(cols) - 1)
self._cols_min_size_none = sum(cols) + width
# we need to subtract for the sh_max/min the already guaranteed size
# due to having a None in the col. So sh_min gets smaller by that size
# since it's already covered. Similarly for sh_max, because if we
# already exceeded the max, the subtracted max will be zero, so
# it won't get larger
if self._has_hint_bound_x:
cols_sh_min = self._cols_sh_min
cols_sh_max = self._cols_sh_max
for i, (c, sh_min, sh_max) in enumerate(
zip(cols, cols_sh_min, cols_sh_max)):
if sh_min is not None:
width += max(c, sh_min)
cols_sh_min[i] = max(0., sh_min - c)
else:
width += c
if sh_max is not None:
cols_sh_max[i] = max(0., sh_max - c)
else:
width = self._cols_min_size_none
height = t + b + spacing_y * (len(rows) - 1)
self._rows_min_size_none = sum(rows) + height
if self._has_hint_bound_y:
rows_sh_min = self._rows_sh_min
rows_sh_max = self._rows_sh_max
for i, (r, sh_min, sh_max) in enumerate(
zip(rows, rows_sh_min, rows_sh_max)):
if sh_min is not None:
height += max(r, sh_min)
rows_sh_min[i] = max(0., sh_min - r)
else:
height += r
if sh_max is not None:
rows_sh_max[i] = max(0., sh_max - r)
else:
height = self._rows_min_size_none
# finally, set the minimum size
self.minimum_size = (width, height)
def _finalize_rows_cols_sizes(self):
selfw = self.width
selfh = self.height
# resolve size for each column
if self.col_force_default:
cols = [self.col_default_width] * len(self._cols)
for index, value in self.cols_minimum.items():
cols[index] = value
self._cols = cols
else:
cols = self._cols
cols_sh = self._cols_sh
cols_sh_min = self._cols_sh_min
cols_weight = float(sum((x for x in cols_sh if x is not None)))
stretch_w = max(0., selfw - self._cols_min_size_none)
if stretch_w > 1e-9:
if self._has_hint_bound_x:
# fix the hints to be within bounds
self.layout_hint_with_bounds(
cols_weight, stretch_w,
sum((c for c in cols_sh_min if c is not None)),
cols_sh_min, self._cols_sh_max, cols_sh)
for index, col_stretch in enumerate(cols_sh):
# if the col don't have stretch information, nothing to do
if not col_stretch:
continue
# add to the min width whatever remains from size_hint
cols[index] += stretch_w * col_stretch / cols_weight
# same algo for rows
if self.row_force_default:
rows = [self.row_default_height] * len(self._rows)
for index, value in self.rows_minimum.items():
rows[index] = value
self._rows = rows
else:
rows = self._rows
rows_sh = self._rows_sh
rows_sh_min = self._rows_sh_min
rows_weight = float(sum((x for x in rows_sh if x is not None)))
stretch_h = max(0., selfh - self._rows_min_size_none)
if stretch_h > 1e-9:
if self._has_hint_bound_y:
# fix the hints to be within bounds
self.layout_hint_with_bounds(
rows_weight, stretch_h,
sum((r for r in rows_sh_min if r is not None)),
rows_sh_min, self._rows_sh_max, rows_sh)
for index, row_stretch in enumerate(rows_sh):
# if the row don't have stretch information, nothing to do
if not row_stretch:
continue
# add to the min height whatever remains from size_hint
rows[index] += stretch_h * row_stretch / rows_weight
def _iterate_layout(self, count):
selfx = self.x
padding_left = self.padding[0]
padding_top = self.padding[1]
spacing_x, spacing_y = self.spacing
i = count - 1
y = self.top - padding_top
cols = self._cols
for row_height in self._rows:
x = selfx + padding_left
for col_width in cols:
if i < 0:
break
yield i, x, y - row_height, col_width, row_height
i = i - 1
x = x + col_width + spacing_x
y -= row_height + spacing_y
def do_layout(self, *largs):
children = self.children
if not children or not self._init_rows_cols_sizes(len(children)):
l, t, r, b = self.padding
self.minimum_size = l + r, t + b
return
self._fill_rows_cols_sizes()
self._update_minimum_size()
self._finalize_rows_cols_sizes()
for i, x, y, w, h in self._iterate_layout(len(children)):
c = children[i]
c.pos = x, y
shw, shh = c.size_hint
shw_min, shh_min = c.size_hint_min
shw_max, shh_max = c.size_hint_max
if shw_min is not None:
if shw_max is not None:
w = max(min(w, shw_max), shw_min)
else:
w = max(w, shw_min)
else:
if shw_max is not None:
w = min(w, shw_max)
if shh_min is not None:
if shh_max is not None:
h = max(min(h, shh_max), shh_min)
else:
h = max(h, shh_min)
else:
if shh_max is not None:
h = min(h, shh_max)
if shw is None:
if shh is not None:
c.height = h
else:
if shh is None:
c.width = w
else:
c.size = (w, h)
| LogicalDash/kivy | kivy/uix/gridlayout.py | Python | mit | 19,254 |
"""
89. Gray Code
https://leetcode.com/problems/gray-code/
"""
from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0]
for i in range(n):
res += [x + 2**i for x in reversed(res)]
return res
def main():
s = Solution()
print(s.grayCode(3))
if __name__ == '__main__':
raise(SystemExit(main()))
| pisskidney/leetcode | medium/89.py | Python | mit | 385 |
"""
Test storage
"""
from django.test import TestCase
class StorageTestCase(TestCase):
def test_import(self):
from launchlab_django_utils.storage import StaticRootS3Boto3Storage
from launchlab_django_utils.storage import MediaRootS3Boto3Storage
| LaunchlabAU/launchlab-django-utils | tests/test_storage.py | Python | mit | 267 |
"""Linear Algebra Helper Routines."""
from warnings import warn
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import aslinearoperator
from scipy.linalg import lapack, get_blas_funcs, eig, svd
from .params import set_tol
def norm(x, pnorm='2'):
"""2-norm of a vector.
Parameters
----------
x : array_like
Vector of complex or real values
pnorm : string
'2' calculates the 2-norm
'inf' calculates the infinity-norm
Returns
-------
n : float
2-norm of a vector
Notes
-----
- currently 1+ order of magnitude faster than scipy.linalg.norm(x), which
calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an
extra copy
- only handles the 2-norm and infinity-norm for vectors
See Also
--------
scipy.linalg.norm : scipy general matrix or vector norm
"""
x = np.ravel(x)
if pnorm == '2':
return np.sqrt(np.inner(x.conj(), x).real)
if pnorm == 'inf':
return np.max(np.abs(x))
raise ValueError('Only the 2-norm and infinity-norm are supported')
def infinity_norm(A):
"""Infinity norm of a matrix (maximum absolute row sum).
Parameters
----------
A : csr_matrix, csc_matrix, sparse, or numpy matrix
Sparse or dense matrix
Returns
-------
n : float
Infinity norm of the matrix
Notes
-----
- This serves as an upper bound on spectral radius.
- csr and csc avoid a deep copy
- dense calls scipy.linalg.norm
See Also
--------
scipy.linalg.norm : dense matrix norms
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.linalg import infinity_norm
>>> n=10
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n)
>>> print(infinity_norm(A))
4.0
"""
if sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A):
# avoid copying index and ptr arrays
abs_A = A.__class__((np.abs(A.data), A.indices, A.indptr),
shape=A.shape)
return (abs_A * np.ones((A.shape[1]), dtype=A.dtype)).max()
if sparse.isspmatrix(A):
return (abs(A) * np.ones((A.shape[1]), dtype=A.dtype)).max()
return np.dot(np.abs(A), np.ones((A.shape[1],), dtype=A.dtype)).max()
def axpy(x, y, a=1.0):
"""Quick level-1 call to BLAS y = a*x+y.
Parameters
----------
x : array_like
nx1 real or complex vector
y : array_like
nx1 real or complex vector
a : float
real or complex scalar
Returns
-------
y : array_like
Input variable y is rewritten
Notes
-----
The call to get_blas_funcs automatically determines the prefix for the blas
call.
"""
fn = get_blas_funcs(['axpy'], [x, y])[0]
fn(x, y, a)
# def approximate_spectral_radius(A, tol=0.1, maxiter=10, symmetric=False):
# """approximate the spectral radius of a matrix
#
# Parameters
# ----------
#
# A : {dense or sparse matrix}
# E.g. csr_matrix, csc_matrix, ndarray, etc.
# tol : {scalar}
# Tolerance of approximation
# maxiter : {integer}
# Maximum number of iterations to perform
# symmetric : {boolean}
# True if A is symmetric, False otherwise (default)
#
# Returns
# -------
# An approximation to the spectral radius of A
#
# """
# if symmetric:
# method = eigen_symmetric
# else:
# method = eigen
#
# return norm( method(A, k=1, tol=0.1, which='LM', maxiter=maxiter,
# return_eigenvectors=False) )
def _approximate_eigenvalues(A, maxiter, symmetric=None, initial_guess=None):
"""Apprixmate eigenvalues.
Used by approximate_spectral_radius and condest.
Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
and eigenvalues of the Hessenberg matrix H, respectively, and V is the
Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered
breakdown. E is therefore the approximate eigenvalues of A.
To obtain approximate eigenvectors of A, compute V*W.
"""
A = aslinearoperator(A) # A could be dense or sparse, or something weird
# Choose tolerance for deciding if break-down has occurred
breakdown = set_tol(A.dtype)
breakdown_flag = False
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
maxiter = min(A.shape[0], maxiter)
if initial_guess is None:
v0 = np.random.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1)
else:
v0 = initial_guess
v0 /= norm(v0)
# Important to type H based on v0, so that a real nonsymmetric matrix, can
# have an imaginary initial guess for its Arnoldi Krylov space
H = np.zeros((maxiter+1, maxiter),
dtype=np.find_common_type([v0.dtype, A.dtype], []))
V = [v0]
beta = 0.0
for j in range(maxiter):
w = A * V[-1]
if symmetric:
if j >= 1:
H[j-1, j] = beta
w -= beta * V[-2]
alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
H[j, j] = alpha
w -= alpha * V[-1] # axpy(V[-1],w,-alpha)
beta = norm(w)
H[j+1, j] = beta
if (H[j+1, j] < breakdown):
breakdown_flag = True
break
w /= beta
V.append(w)
V = V[-2:] # retain only last two vectors
else:
# orthogonalize against Vs
for i, v in enumerate(V):
H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
w = w - H[i, j]*v
H[j+1, j] = norm(w)
if (H[j+1, j] < breakdown):
breakdown_flag = True
if H[j+1, j] != 0.0:
w = w/H[j+1, j]
V.append(w)
break
w = w/H[j+1, j]
V.append(w)
# if upper 2x2 block of Hessenberg matrix H is almost symmetric,
# and the user has not explicitly specified symmetric=False,
# then switch to symmetric Lanczos algorithm
# if symmetric is not False and j == 1:
# if abs(H[1,0] - H[0,1]) < 1e-12:
# #print("using symmetric mode")
# symmetric = True
# V = V[1:]
# H[1,0] = H[0,1]
# beta = H[2,1]
# print("Approximated spectral radius in %d iterations" % (j + 1))
Eigs, Vects = eig(H[:j+1, :j+1], left=False, right=True)
return (Vects, Eigs, H, V, breakdown_flag)
def approximate_spectral_radius(A, tol=0.01, maxiter=15, restart=5,
symmetric=None, initial_guess=None,
return_vector=False):
"""Approximate the spectral radius of a matrix.
Parameters
----------
A : {dense or sparse matrix}
E.g. csr_matrix, csc_matrix, ndarray, etc.
tol : {scalar}
Relative tolerance of approximation, i.e., the error divided
by the approximate spectral radius is compared to tol.
maxiter : {integer}
Maximum number of iterations to perform
restart : {integer}
Number of restarted Arnoldi processes. For example, a value of 0 will
run Arnoldi once, for maxiter iterations, and a value of 1 will restart
Arnoldi once, using the maximal eigenvector from the first Arnoldi
process as the initial guess.
symmetric : {boolean}
True - if A is symmetric Lanczos iteration is used (more efficient)
False - if A is non-symmetric Arnoldi iteration is used (less efficient)
initial_guess : {array|None}
If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
If None, then use a random initial guess.
return_vector : {boolean}
True - return an approximate dominant eigenvector and the spectral radius.
False - Do not return the approximate dominant eigenvector
Returns
-------
An approximation to the spectral radius of A, and
if return_vector=True, then also return the approximate dominant
eigenvector
Notes
-----
The spectral radius is approximated by looking at the Ritz eigenvalues.
Arnoldi iteration (or Lanczos) is used to project the matrix A onto a
Krylov subspace: H = Q* A Q. The eigenvalues of H (i.e. the Ritz
eigenvalues) should represent the eigenvalues of A in the sense that the
minimum and maximum values are usually well matched (for the symmetric case
it is true since the eigenvalues are real).
References
----------
.. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst,
editors. "Templates for the Solution of Algebraic Eigenvalue Problems:
A Practical Guide", SIAM, Philadelphia, 2000.
Examples
--------
>>> from pyamg.util.linalg import approximate_spectral_radius
>>> import numpy as np
>>> from scipy.linalg import eigvals, norm
>>> A = np.array([[1.,0.],[0.,1.]])
>>> sr = approximate_spectral_radius(A,maxiter=3)
>>> print(f'{sr:2.6}')
1.0
>>> print(max([norm(x) for x in eigvals(A)]))
1.0
"""
if not hasattr(A, 'rho') or return_vector:
# somehow more restart causes a nonsymmetric case to fail...look at
# this what about A.dtype=int? convert somehow?
# The use of the restart vector v0 requires that the full Krylov
# subspace V be stored. So, set symmetric to False.
symmetric = False
if maxiter < 1:
raise ValueError('expected maxiter > 0')
if restart < 0:
raise ValueError('expected restart >= 0')
if A.dtype == int:
raise ValueError('expected A to be float (complex or real)')
if A.shape[0] != A.shape[1]:
raise ValueError('expected square A')
if initial_guess is None:
v0 = np.random.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1)
else:
if initial_guess.shape[0] != A.shape[0]:
raise ValueError('initial_guess and A must have same shape')
if (len(initial_guess.shape) > 1) and (initial_guess.shape[1] > 1):
raise ValueError('initial_guess must be an (n,1) or\
(n,) vector')
v0 = initial_guess.reshape(-1, 1)
v0 = np.array(v0, dtype=A.dtype)
for j in range(restart+1):
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(A, maxiter, symmetric, initial_guess=v0)
# Calculate error in dominant eigenvector
nvecs = ev.shape[0]
max_index = np.abs(ev).argmax()
error = H[nvecs, nvecs-1] * evect[-1, max_index]
# error is a fast way of calculating the following line
# error2 = ( A - ev[max_index]*sp.mat(
# sp.eye(A.shape[0],A.shape[1])) )*\
# ( sp.mat(sp.hstack(V[:-1]))*\
# evect[:,max_index].reshape(-1,1) )
# print(str(error) + " " + str(sp.linalg.norm(e2)))
v0 = np.dot(np.hstack(V[:-1]), evect[:, max_index].reshape(-1, 1))
if np.abs(error)/np.abs(ev[max_index]) < tol:
# halt if below relative tolerance
break
if breakdown_flag:
warn(f'Breakdown occured in step {j}')
break
# end j-loop
rho = np.abs(ev[max_index])
if sparse.isspmatrix(A):
A.rho = rho
if return_vector:
return (rho, v0)
return rho
return A.rho
def condest(A, maxiter=25, symmetric=False):
r"""Estimates the condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
maxiter: {int}
Max number of Arnoldi/Lanczos iterations
symmetric : {bool}
If symmetric use the far more efficient Lanczos algorithm,
Else use Arnoldi.
If hermitian, use symmetric=True.
If complex symmetric, use symmetric=False.
Returns
-------
Estimate of cond(A) with \|lambda_max\| / \|lambda_min\| or simga_max / sigma_min
through the use of Arnoldi or Lanczos iterations, depending on
the symmetric flag
Notes
-----
The condition number measures how large of a change in the
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.,0.],[0.,2.]]))
>>> print(f'{c:2.6}')
2.0
"""
C = aslinearoperator(A)
power = 1
if not symmetric:
def matvec(v):
return C.rmatvec(C.A @ v)
C.matvec = matvec
power = 0.5
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(C, maxiter, symmetric)
del evect, H, V, breakdown_flag
return (np.max([norm(x) for x in ev])/min(norm(x) for x in ev))**power
def cond(A):
"""Return condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
Returns
-------
2-norm condition number through use of the SVD
Use for small to moderate sized dense matrices.
For large sparse matrices, use condest.
Notes
-----
The condition number measures how large of a change in
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.0,0.],[0.,2.0]]))
>>> print(f'{c:2.6}')
2.0
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
if sparse.isspmatrix(A):
A = A.toarray()
U, Sigma, Vh = svd(A)
del U, Vh
# 2-Norm Condition Number
return np.max(Sigma)/min(Sigma)
def ishermitian(A, fast_check=True, tol=1e-6, verbose=False):
r"""Return True if A is Hermitian to within tol.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
fast_check : {bool}
If True, use the heuristic < Ax, y> = < x, Ay>
for random vectors x and y to check for conjugate symmetry.
If False, compute A - A.conj().T.
tol : {float}
Symmetry tolerance
verbose: {bool}
prints
max( \|A - A.conj().T\| ) if nonhermitian and fast_check=False..
\| <Ax, y> - <x, Ay> ) \| / sqrt( \| <Ax, y> * <x, Ay> \| )
if nonhermitian and fast_check=True
Returns
-------
True if hermitian
False if nonhermitian
Notes
-----
This function applies a simple test of conjugate symmetry
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import ishermitian
>>> ishermitian(np.array([[1,2],[1,1]]))
False
>>> from pyamg.gallery import poisson
>>> ishermitian(poisson((10,10)))
True
"""
# convert to array type
if not sparse.isspmatrix(A):
A = np.asarray(A)
if fast_check:
x = np.random.rand(A.shape[0], 1)
y = np.random.rand(A.shape[0], 1)
if A.dtype == complex:
x = x + 1.0j*np.random.rand(A.shape[0], 1)
y = y + 1.0j*np.random.rand(A.shape[0], 1)
xAy = np.dot((A.dot(x)).conjugate().T, y)
xAty = np.dot(x.conjugate().T, A.dot(y))
diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy*xAty)))
else:
# compute the difference, A - A.conj().T
if sparse.isspmatrix(A):
diff = np.ravel((A - A.conj().T).data)
else:
diff = np.ravel(A - A.conj().T)
if np.max(diff.shape) == 0:
diff = 0
else:
diff = np.max(np.abs(diff))
if diff < tol:
diff = 0
return True
if verbose:
print(diff)
return False
def pinv_array(a, tol=None):
"""Calculate the Moore-Penrose pseudo inverse of each block of the 3D array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
tol : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling a pseudoinverse (SVD)
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a)
"""
n = a.shape[0]
m = a.shape[1]
if m == 1:
# Pseudo-inverse of 1 x 1 matrices is trivial
zero_entries = (a == 0.0).nonzero()[0]
a[zero_entries] = 1.0
a[:] = 1.0/a
a[zero_entries] = 0.0
del zero_entries
else:
# The block size is greater than 1
# Create necessary arrays and function pointers for calculating pinv
gelss, gelss_lwork = lapack.get_lapack_funcs(('gelss', 'gelss_lwork'),
(np.ones((1,), dtype=a.dtype)))
RHS = np.eye(m, dtype=a.dtype)
# pylint: disable=protected-access
lwork = lapack._compute_lwork(gelss_lwork, m, m, m)
# pylint: enable=protected-access
# Choose tolerance for which singular values are zero in *gelss below
if tol is None:
tol = set_tol(a.dtype)
# Invert each block of a
for kk in range(n):
gelssoutput = gelss(a[kk], RHS, cond=tol, lwork=lwork,
overwrite_a=True, overwrite_b=False)
a[kk] = gelssoutput[1]
| pyamg/pyamg | pyamg/util/linalg.py | Python | mit | 18,547 |
import sys
sys.path.insert(0,'../src/')
# Begin From obstacle_avoidance
import rospy
import math
from math import sin, cos
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from collections import namedtuple
Obstacle = namedtuple('Obstacle', ['r', 'theta'])
# End From obstacle_avoidance
from obstacle_avoidance import ObstacleAvoidance
import unittest
class TestCurvatureCalculations(unittest.TestCase):
def test_left(self):
# Obstacle = namedtuple('Obstacle', ['r', 'theta'])
oa = ObstacleAvoidance()
v = 2
omega = .1
originalCurvature = omega/v
pathWidth = 1
filteredListOfRThetaPairs = []
filteredListOfRThetaPairs.append(Obstacle(r=1.6328, theta=-0.4421))
filteredListOfRThetaPairs.append(Obstacle(r=1.4904, theta=-0.2019))
filteredListOfRThetaPairs.append(Obstacle(r=1.0792, theta=-0.3143))
filteredListOfRThetaPairs.append(Obstacle(r=1.4444, theta=-0.3247))
filteredListOfRThetaPairs.append(Obstacle(r=1.1740, theta=-0.2601))
filteredListOfRThetaPairs.append(Obstacle(r=1.2565, theta=-0.2686))
filteredListOfRThetaPairs.append(Obstacle(r=1.5160, theta=-0.5730))
filteredListOfRThetaPairs.append(Obstacle(r=1.7103, theta=-0.5350))
filteredListOfRThetaPairs.append(Obstacle(r=1.2089, theta=-0.0008))
filteredListOfRThetaPairs.append(Obstacle(r=1.7064, theta=-0.5072))
curvatureToPassObstaclesOnLeft = oa.calculateCurvatureToPassObstaclesOnLeft(originalCurvature, pathWidth, filteredListOfRThetaPairs)
print(str(curvatureToPassObstaclesOnLeft))
self.assertTrue(abs(curvatureToPassObstaclesOnLeft-0.8240)<0.001)
def test_right(self):
# Obstacle = namedtuple('Obstacle', ['r', 'theta'])
oa = ObstacleAvoidance()
v = 2
omega = .1
originalCurvature = omega/v
pathWidth = 1
filteredListOfRThetaPairs = []
filteredListOfRThetaPairs.append(Obstacle(r=1.6328, theta=-0.4421))
filteredListOfRThetaPairs.append(Obstacle(r=1.4904, theta=-0.2019))
filteredListOfRThetaPairs.append(Obstacle(r=1.0792, theta=-0.3143))
filteredListOfRThetaPairs.append(Obstacle(r=1.4444, theta=-0.3247))
filteredListOfRThetaPairs.append(Obstacle(r=1.1740, theta=-0.2601))
filteredListOfRThetaPairs.append(Obstacle(r=1.2565, theta=-0.2686))
filteredListOfRThetaPairs.append(Obstacle(r=1.5160, theta=-0.5730))
filteredListOfRThetaPairs.append(Obstacle(r=1.7103, theta=-0.5350))
filteredListOfRThetaPairs.append(Obstacle(r=1.2089, theta=-0.0008))
filteredListOfRThetaPairs.append(Obstacle(r=1.7064, theta=-0.5072))
curvatureToPassObstaclesOnRight = oa.calculateCurvatureToPassObstaclesOnRight(originalCurvature, pathWidth, filteredListOfRThetaPairs)
print(str(curvatureToPassObstaclesOnRight))
self.assertTrue(abs(curvatureToPassObstaclesOnRight-(-1.8228))<0.001)
if __name__ == '__main__':
unittest.main()
| cwrucutter/snowmower_obstacles | test/test_obstacle_avoidance.py | Python | mit | 3,038 |
#!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
#
# NOTE: the lastUpdate field is deprecated, do not use it!
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
class WhisperException(Exception):
"""Base class for whisper exceptions."""
class InvalidConfiguration(WhisperException):
"""Invalid configuration."""
class InvalidTimeInterval(WhisperException):
"""Invalid time interval."""
class TimestampNotCovered(WhisperException):
"""Timestamp not covered by any archives in this database."""
def enableDebug():
global open, debug, startBlock, endBlock
class open(file):
def __init__(self,*args,**kwargs):
file.__init__(self,*args,**kwargs)
self.writeCount = 0
self.readCount = 0
def write(self,data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data),self.writeCount))
return file.write(self,data)
def read(self,bytes):
self.readCount += 1
debug('READ %d bytes #%d' % (bytes,self.readCount))
return file.read(self,bytes)
def debug(message):
print 'DEBUG :: %s' % message
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
info = __headerCache.get(fh.name)
if info: return info
#startBlock('__readHeader')
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
(lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
(offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
#'lastUpdate' : lastUpdate, # Deprecated
'maxRetention' : maxRetention,
'xFilesFactor' : xff,
'archives' : archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
#endBlock('__readHeader')
return info
def __changeLastUpdate(fh):
return #XXX Make this a NOP, use os.stat(filename).st_mtime instead
startBlock('__changeLastUpdate()')
originalOffset = fh.tell()
fh.seek(0) #Based on assumption that first field is lastUpdate
now = int( time.time() )
packedTime = struct.pack(timestampFormat,now)
fh.write(packedTime)
fh.seek(originalOffset)
endBlock('__changeLastUpdate()')
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
if not archiveList:
raise InvalidConfiguration("You must specify at least one archive configuration!")
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
if not (archive[0] < next[0]):
raise InvalidConfiguration("You cannot configure two archives "
"with the same precision %s,%s" % (archive,next))
if (next[0] % archive[0]) != 0:
raise InvalidConfiguration("Higher precision archives' precision "
"must evenly divide all lower precision archives' precision %s,%s" \
% (archive[0],next[0]))
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
if not (nextRetention > retention):
raise InvalidConfiguration("Lower precision archives must cover "
"larger time intervals than higher precision archives %s,%s" \
% (archive,next))
#Looks good, now we create the file and write the header
if os.path.exists(path):
raise InvalidConfiguration("File %s already exists!" % path)
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def __propagate(fh,timestamp,xff,higher,lower):
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint']
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
(higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance / higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint']
higherSize = higherPoints * pointSize
relativeFirstOffset = higherFirstOffset - higher['offset']
relativeLastOffset = (relativeFirstOffset + higherSize) % higher['size']
higherLastOffset = relativeLastOffset + higher['offset']
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: #we don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: #We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
#Now we unpack the series data we just read
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i/2] = unpackedSeries[i+1]
currentInterval += step
#Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
if not knownValues:
return False
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: #we have enough data to propagate a value!
aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average?
myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
(lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint)
if lowerBaseInterval == 0: #First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: #Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance / lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
value = float(value)
fh = open(path,'r+b')
return file_update(fh, value, timestamp)
def file_update(fh, value, timestamp):
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
if not ((diff < header['maxRetention']) and diff >= 0):
raise TimestampNotCovered("Timestamp not covered by any archives in "
"this database.")
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
return file_update_many(fh, points)
def file_update_many(fh, points):
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = archives.next()
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = archives.next()
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
def __archive_update_many(fh,header,archive,points):
step = archive['secondsPerPoint']
#startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
alignedPoints = [ (timestamp - (timestamp % step), value)
for (timestamp,value) in points ]
#Create a packed string for each contiguous sequence of points
#startBlock('__archive_update_many string packing')
packedStrings = []
previousInterval = None
currentString = ""
for (interval,value) in alignedPoints:
#debug('__archive_update_many iterating alignedPoint at %s' % interval)
if (not previousInterval) or (interval == previousInterval + step):
#debug('__archive_update_many was expected, packing onto currentString')
currentString += struct.pack(pointFormat,interval,value)
previousInterval = interval
else:
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString)))
packedStrings.append( (startInterval,currentString) )
currentString = struct.pack(pointFormat,interval,value)
previousInterval = interval
if currentString:
#startInterval = previousInterval - (step * len(currentString) / pointSize) + step
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval))
packedStrings.append( (startInterval,currentString) )
#endBlock('__archive_update_many string packing')
#Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint)
if baseInterval == 0: #This file's first update
#debug('__archive_update_many first update')
baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start
#debug('__archive_update_many baseInterval is %s' % baseInterval)
#Write all of our packed strings in locations determined by the baseInterval
#startBlock('__archive_update_many write() operations')
for (interval,packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance / step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
#debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond))
if bytesBeyond > 0:
fh.write( packedString[:-bytesBeyond] )
#debug('We wrapped an archive!')
assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString))
fh.seek( archive['offset'] )
fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above)
else:
fh.write(packedString)
#endBlock('__archive_update_many write() operations')
#Now we propagate the updates to lower-precision archives
#startBlock('__archive_update_many propagation')
higher = archive
lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']]
#debug('__archive_update_many I have %d lower archives' % len(lowerArchives))
for lower in lowerArchives:
fit = lambda i: i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
#debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals)))
propagateFurther = False
for interval in uniqueLowerIntervals:
#debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval))
if __propagate(fh,interval,header['xFilesFactor'],higher,lower):
propagateFurther = True
#debug(' __archive_update_many Successful propagation!')
#debug(' __archive_update_many propagateFurther=%s' % propagateFurther)
if not propagateFurther: break
higher = lower
#endBlock('__archive_update_many propagation')
#endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
return file_fetch(fh, fromTime, untilTime)
def file_fetch(fh, fromTime, untilTime):
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None:
untilTime = now
fromTime = int(fromTime)
untilTime = int(untilTime)
oldestTime = now - header['maxRetention']
if fromTime < oldestTime:
fromTime = oldestTime
if not (fromTime < untilTime):
raise InvalidTimeInterval("Invalid time interval")
if untilTime > now:
untilTime = now
if untilTime < fromTime:
untilTime = now
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint']
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint']
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
| eric/whisper-rb | lib/whisper/py/whisper.py | Python | mit | 21,690 |
# -*- coding: utf-8 -*-
'''
:author: Patrick Lauer
This class holds the Artificial Bee Colony(ABC) algorithm, based on Karaboga (2007):
D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005.
D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import spotpy
import numpy as np
import time
import random
import itertools
class abc(_algorithm):
'''
Implements the ABC algorithm from Karaboga (2007).
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os).
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
'''
def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq',save_sim=True):
_algorithm.__init__(self,spot_setup, dbname=dbname, dbformat=dbformat, parallel=parallel,save_sim=save_sim)
def simulate(self,id_params_tuple):
id,params = id_params_tuple
simulations=self.model(params)
return id,params,simulations
def sample(self,repetitions,eb=48,a=(1/10),peps=0.0001,ownlimit=False,limit=24):
"""
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
eb: int
number of employed bees (half of population size)
a: float
mutation factor
peps: float
Convergence criterium
ownlimit: boolean
determines if an userdefined limit is set or not
limit: int
sets the limit
"""
#Initialize the Progress bar
starttime = time.time()
intervaltime = starttime
#Initialize ABC parameters:
randompar=self.parameter()['random']
self.nopt=randompar.size
random.seed()
if ownlimit == True:
self.limit=limit
else:
self.limit=eb
lb,ub=self.parameter()['minbound'],self.parameter()['maxbound']
#Initialization
work=[]
#Calculate the objective function
param_generator = ((rep,list(self.parameter()['random'])) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
self.status(rep,like,randompar)
#Save everything in the database
self.datawriter.save(like,randompar,simulations=simulations)
c=0
p=0
work.append([like,randompar,like,randompar,c,p])#(fit_x,x,fit_v,v,limit,normalized fitness)
#Progress bar
acttime=time.time()
#get str showing approximate timeleft to end of simulation in H, M, S
timestr = time.strftime("%H:%M:%S", time.gmtime(round(((acttime-starttime)/
(rep + 1))*(repetitions-(rep + 1 )))))
#Refresh progressbar every second
if acttime-intervaltime>=2:
text='%i of %i (best like=%g) est. time remaining: %s' % (rep,repetitions,
self.status.objectivefunction,timestr)
print(text)
intervaltime=time.time()
icall=0
gnrng=1e100
while icall<repetitions and gnrng>peps: #and criter_change>pcento:
psum=0
#Employed bee phase
#Generate new input parameters
for i,val in enumerate(work):
k=i
while k==i: k=random.randint(0,(eb-1))
j=random.randint(0,(self.nopt-1))
work[i][3][j]=work[i][1][j]+random.uniform(-a,a)*(work[i][1][j]-work[k][1][j])
if work[i][3][j]<lb[j]: work[i][3][j]=lb[j]
if work[i][3][j]>ub[j]: work[i][3][j]=ub[j]
'''
#Scout bee phase
if work[i][4] >= self.limit:
work[i][3]=self.parameter()['random']
work[i][4]=0
'''
#Calculate the objective function
param_generator = ((rep,work[rep][3]) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
if clike > work[rep][0]:
work[rep][1]=work[rep][3]
work[rep][0]=clike
work[rep][4]=0
else:
work[rep][4]=work[rep][4]+1
self.status(rep,work[rep][0],work[rep][1])
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
icall += 1
#Probability distribution for roulette wheel selection
bn=[]
for i,val in enumerate(work):
psum=psum+(1/work[i][0])
for i,val in enumerate(work):
work[i][5]=((1/work[i][0])/psum)
bn.append(work[i][5])
bounds = np.cumsum(bn)
#Onlooker bee phase
#Roulette wheel selection
for i,val in enumerate(work):
pn=random.uniform(0,1)
k=i
while k==i:
k=random.randint(0,eb-1)
for t,vol in enumerate(bounds):
if bounds[t]-pn>=0:
z=t
break
j=random.randint(0,(self.nopt-1))
#Generate new input parameters
work[i][3][j]=work[z][1][j]+random.uniform(-a,a)*(work[z][1][j]-work[k][1][j])
if work[i][3][j]<lb[j]: work[i][3][j]=lb[j]
if work[i][3][j]>ub[j]: work[i][3][j]=ub[j]
#Calculate the objective function
param_generator = ((rep,work[rep][3]) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
if clike > work[rep][0]:
work[rep][1]=work[rep][3]
work[rep][0]=clike
work[rep][4]=0
else:
work[rep][4]=work[rep][4]+1
self.status(rep,work[rep][0],work[rep][1])
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
icall += 1
#Scout bee phase
for i,val in enumerate(work):
if work[i][4] >= self.limit:
work[i][1]=self.parameter()['random']
work[i][4]=0
t,work[i][0],simulations=self.simulate((icall,work[i][1]))
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
work[i][0]=clike
icall += 1
gnrng=-self.status.objectivefunction
text='%i of %i (best like=%g) est. time remaining: %s' % (icall,repetitions,self.status.objectivefunction,timestr)
print(text)
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED.')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
print('Best parameter set:')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(-self.status.objectivefunction)
print(icall)
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass | gitporst/spotpy | spotpy/algorithms/abc.py | Python | mit | 9,796 |
Subsets and Splits