repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jjlee3/openthread | tools/harness-automation/cases/leader_5_1_1.py | 16 | 1875 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_1_1(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
potsmaster/cinder | cinder/tests/unit/test_infortrend_common.py | 18 | 77180 | # Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import test_infortrend_cli
from cinder.tests.unit import utils
from cinder.volume import configuration
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
SUCCEED = (0, '')
FAKE_ERROR_RETURN = (-1, '')
class InfortrendTestCass(test.TestCase):
def __init__(self, *args, **kwargs):
super(InfortrendTestCass, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendTestCass, self).setUp()
self.cli_data = test_infortrend_cli.InfortrendCLITestData()
self.configuration = configuration.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = self._fake_safe_get
def _fake_safe_get(self, key):
return getattr(self.configuration, key)
def _driver_setup(self, mock_commands, configuration=None):
if configuration is None:
configuration = self.configuration
self.driver = self._get_driver(configuration)
mock_commands_execute = self._mock_command_execute(mock_commands)
mock_cli = mock.Mock(side_effect=mock_commands_execute)
self.driver._execute_command = mock_cli
def _get_driver(self, conf):
raise NotImplementedError
def _mock_command_execute(self, mock_commands):
def fake_execute_command(cli_type, *args, **kwargs):
if cli_type in mock_commands.keys():
if isinstance(mock_commands[cli_type], list):
ret = mock_commands[cli_type][0]
del mock_commands[cli_type][0]
return ret
elif isinstance(mock_commands[cli_type], tuple):
return mock_commands[cli_type]
else:
return mock_commands[cli_type](*args, **kwargs)
return FAKE_ERROR_RETURN
return fake_execute_command
def _mock_show_lv_for_migrate(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier_for_migration()
return self.cli_data.get_test_show_lv()
def _mock_show_lv(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier()
return self.cli_data.get_test_show_lv()
def _assert_cli_has_calls(self, expect_cli_cmd):
self.driver._execute_command.assert_has_calls(expect_cli_cmd)
class InfortrendFCCommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendFCCommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '0,5'
self.configuration.infortrend_slots_b_channels_id = '0,5'
self.configuration.infortrend_cli_timeout = 30
def _get_driver(self, conf):
return common_cli.InfortrendCommon('FC', configuration=conf)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_normal_channel_with_r_model(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {'0': [], '5': []},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {'0': '113', '5': '113'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, self.cli_data.test_fc_properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_specific_channel(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_with_specific_channel)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_with_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_with_specific_channel)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_multipath_with_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_fc)
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_multipath_r_model)
def test_initialize_connection_with_get_wwn_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning_r_model)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_r_model_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning_r_model)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
conn_info, self.cli_data.test_fc_terminate_conn_info)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning_and_lun_map_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(None, conn_info)
class InfortrendiSCSICommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendiSCSICommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '1,2,4'
self.configuration.infortrend_slots_b_channels_id = '1,2,4'
def _get_driver(self, conf):
return common_cli.InfortrendCommon('iSCSI', configuration=conf)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_iqn_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_has_map(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_no_such_name(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info()
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_normal_channel_with_multipath(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {'1': [], '2': [], '4': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {'1': '1', '2': '1', '4': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_specific_channel(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '2, 4'
test_map_dict = {
'slot_a': {'2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_update_mcs_dict(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_mcs_dict = {
'slot_a': {'1': ['1', '2'], '2': ['4']},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(self.driver.mcs_dict, test_mcs_dict)
def test_mapping_info_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '2': ['4']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['1', '2'],
}
test_map_lun = ['2']
test_mcs_id = '0'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(map_chl, test_map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_mapping_info_with_mcs_multi_group(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {
'1': lun_list[2:],
'2': lun_list[:],
'3': lun_list[:],
'4': lun_list[1:],
'5': lun_list[:],
},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['3', '4'],
}
test_map_lun = ['1']
test_mcs_id = '1'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(map_chl, test_map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_specific_channel_with_multipath(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_specific_channel_with_multipath_r_model(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
configuration.infortrend_slots_b_channels_id = '1'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {'1': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {'1': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[0]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume(test_volume)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_volume_with_create_fail(self):
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_snapshot_id = self.cli_data.fake_snapshot_id
test_pair_id = self.cli_data.fake_pair_id
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
expect_cli_cmd = [
mock.call('ShowPartition', '-l'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id[0], '-y'),
mock.call('ShowSnapshot', 'part=%s' % test_partition_id),
mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'),
mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'),
mock.call('ShowMap', 'part=%s' % test_partition_id),
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeletePartition', test_partition_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_volume_with_sync_pair(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_sync_pair(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_volume,
test_volume)
def test_delete_volume_with_delete_fail(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_volume_with_partiton_not_found(self, log_warning):
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_empty_list(),
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume_without_provider(self, log_info):
test_system_id = self.cli_data.fake_system_id[0]
test_volume = copy.deepcopy(self.cli_data.test_volume)
test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(test_system_id, 16), 'None')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_cloned_volume(self, log_info):
fake_partition_id = self.cli_data.fake_partition_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_src_volume = self.cli_data.test_volume
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
fake_partition_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_cloned_volume(
test_dst_volume, test_src_volume)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_cloned_volume_with_create_replica_fail(self):
test_dst_volume = self.cli_data.test_dst_volume
test_src_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': FAKE_ERROR_RETURN,
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_cloned_volume,
test_dst_volume,
test_src_volume)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_export(self):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': test_volume['provider_location'],
}
self.driver = self._get_driver(self.configuration)
model_update = self.driver.create_export(None, test_volume)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_get_volume_stats(self):
test_volume_states = self.cli_data.test_volume_states
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': self.cli_data.get_test_show_lv(),
'ShowPartition': self.cli_data.get_test_show_partition_detail(),
}
self._driver_setup(mock_commands)
self.driver.VERSION = '99.99'
volume_states = self.driver.get_volume_stats(True)
self.assertDictMatch(volume_states, test_volume_states)
def test_get_volume_stats_fail(self):
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.get_volume_stats)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
model_update = self.driver.create_snapshot(self.cli_data.test_snapshot)
self.assertEqual(fake_snapshot_id, model_update['provider_location'])
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot_without_partition_id(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_create_fail(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': FAKE_ERROR_RETURN,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_show_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_snapshot(test_snapshot)
self.assertEqual(1, log_info.call_count)
def test_delete_snapshot_without_provider_location(self):
test_snapshot = self.cli_data.test_snapshot
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
test_snapshot)
def test_delete_snapshot_with_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_snapshot_with_sync_pair(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_si_sync_pair(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot':
self.cli_data.get_test_show_snapshot_detail_filled_block(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot_without_filled_block(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_src_part_id = self.cli_data.fake_partition_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica': [
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_dst_volume_id),
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
],
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
def test_create_volume_from_snapshot_without_provider_location(
self):
test_snapshot = self.cli_data.test_snapshot
test_dst_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_dst_volume,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_iqn_not_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1])
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
test_connector['initiator'] = test_initiator
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateIQN': SUCCEED,
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateIQN', test_initiator, test_initiator[-16:]),
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_empty_map(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_empty_list(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_iscsi_properties_empty_map)
def test_initialize_connection_with_create_map_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': FAKE_ERROR_RETURN,
'ShowNet': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
def test_initialize_connection_with_get_ip_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 10
test_expand_size = test_new_size - test_volume['size']
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sGB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume_mb(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 5.5
test_expand_size = round((test_new_size - test_volume['size']) * 1024)
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sMB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_extend_volume_fail(self):
test_volume = self.cli_data.test_volume
test_new_size = 10
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.extend_volume,
test_volume,
test_new_size)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': SUCCEED,
'DeleteIQN': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeleteIQN', test_connector['initiator'][-16:]),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_terminate_connection_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.terminate_connection,
test_volume,
test_connector)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, test_host)
expect_cli_cmd = [
mock.call('CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
''),
mock.call('ShowPartition'),
mock.call('CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'warning')
def test_migrate_volume_with_invalid_storage(self, log_warning):
fake_host = self.cli_data.fake_host
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, fake_host)
self.assertFalse(rc)
self.assertTrue(model_update is None)
self.assertEqual(1, log_warning.call_count)
def test_migrate_volume_with_get_part_id_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'DeleteMap': SUCCEED,
'CreateReplica': SUCCEED,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_migrate_volume_with_create_replica_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume['id'].replace('-', ''), fake_pool['pool_id']),
'DeleteMap': SUCCEED,
'CreateReplica': FAKE_ERROR_RETURN,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.migrate_volume,
test_volume,
test_host)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume_timeout(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
configuration = copy.copy(self.configuration)
configuration.infortrend_cli_timeout = 0
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id,
'Copy'),
}
self._driver_setup(mock_commands, configuration)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_manage_existing_get_size(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_with_import(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_in_use(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_no_source_id(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_part_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition': FAKE_ERROR_RETURN,
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_map_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(model_update, test_model_update)
def test_manage_existing_rename_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing,
test_volume,
test_ref_volume)
def test_manage_existing_with_part_not_found(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail(),
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing_with_import(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'info')
def test_unmanage(self, log_info):
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.unmanage(test_volume)
expect_cli_cmd = [
mock.call(
'SetPartition',
test_partition_id,
'name=cinder-unmanaged-%s' % test_volume_id[:-17]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_retype_without_change(self, log_info):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = {'extra_specs': {}}
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertTrue(rc)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_retype_with_change_provision(self, log_warning):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertFalse(rc)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_retype_with_migrate(self):
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'ShowSnapshot': SUCCEED,
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
expect_cli_cmd = [
mock.call('ShowSnapshot', 'part=%s' % test_src_part_id),
mock.call(
'CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
'init=disable min=%sMB' % (
int(test_volume['size'] * 1024 * 0.2))
),
mock.call('ShowPartition'),
mock.call(
'CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'
),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_update_migrated_volume(self):
src_volume = self.cli_data.test_volume
dst_volume = copy.deepcopy(self.cli_data.test_dst_volume)
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
test_model_update = {
'_name_id': None,
'provider_location': dst_volume['provider_location'],
}
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
expect_cli_cmd = [
mock.call('SetPartition', test_dst_part_id,
'name=%s' % src_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
def test_update_migrated_volume_rename_fail(self):
src_volume = self.cli_data.test_volume
dst_volume = self.cli_data.test_dst_volume
dst_volume['_name_id'] = 'fake_name_id'
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
self.assertEqual({'_name_id': 'fake_name_id'}, model_update)
| apache-2.0 |
slorg1/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/progress/helpers.py | 404 | 2894 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| mit |
40223148/2015cda_g5 | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/base.py | 603 | 4652 | #!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
| gpl-3.0 |
nmrao/robotframework | src/robot/libdoc.py | 17 | 8412 | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Libdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.libdoc
python path/to/robot/libdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`libdoc` and :func:`libdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
Libdoc itself is implemented in the :mod:`~robot.libdocpkg` package.
"""
USAGE = """robot.libdoc -- Robot Framework library documentation generator
Version: <VERSION>
Usage: python -m robot.libdoc [options] library output_file
or: python -m robot.libdoc [options] library list|show|version [names]
Libdoc tool can generate keyword documentation in HTML and XML formats both
for test libraries and resource files. HTML format is suitable for humans and
XML specs for RIDE and other tools. Libdoc also has few special commands to
show library or resource information on the console.
Libdoc supports all library and resource types and also earlier generated XML
specs can be used as input. If a library needs arguments, they must be given
as part of the library name and separated by two colons, for example, like
`LibraryName::arg1::arg2`.
Options
=======
-f --format HTML|XML Specifies whether to generate HTML or XML output.
If this options is not used, the format is got
from the extension of the output file.
-F --docformat ROBOT|HTML|TEXT|REST
Specifies the source documentation format. Possible
values are Robot Framework's documentation format,
HTML, plain text, and reStructuredText. The default
value can be specified in test library source code
and the initial default value is `ROBOT`.
New in Robot Framework 2.7.5.
-n --name newname Sets the name of the documented library or resource.
-v --version newversion Sets the version of the documented library or
resource.
-P --pythonpath path * Additional locations where to search for libraries
and resources.
-E --escape what:with * Escapes characters which are problematic in console.
'what' is the name of the character to escape and
'with' is the string to escape it with.
<-------------------ESCAPES------------------------>
-h -? --help Print this help.
Creating documentation
======================
When creating documentation in HTML or XML format, the output file must
be specified as a second argument after the library/resource name or path.
Output format is got automatically from the extension but can also be set
with `--format` option.
Examples:
python -m robot.libdoc src/MyLib.py doc/MyLib.html
jython -m robot.libdoc MyJavaLibrary.java MyJavaLibrary.html
python -m robot.libdoc --name MyLib Remote::10.0.0.42:8270 MyLib.xml
Viewing information on console
==============================
Libdoc has three special commands to show information on the console. These
commands are used instead of the name of the output file, and they can also
take additional arguments.
list: List names of the keywords the library/resource contains. Can be
limited to show only certain keywords by passing optional patterns as
arguments. Keyword is listed if its name contains any given pattern.
show: Show library/resource documentation. Can be limited to show only
certain keywords by passing names as arguments. Keyword is shown if
its name matches any given name. Special argument `intro` will show
the library introduction and importing sections.
version: Show library version
Optional patterns given to `list` and `show` are case and space insensitive.
Both also accept `*` and `?` as wildcards.
Examples:
python -m robot.libdoc Dialogs list
python -m robot.libdoc Selenium2Library list browser
python -m robot.libdoc Remote::10.0.0.42:8270 show
python -m robot.libdoc Dialogs show PauseExecution execute*
python -m robot.libdoc Selenium2Library show intro
python -m robot.libdoc Selenium2Library version
Alternative execution
=====================
Libdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). In the examples above Libdoc is executed as an
installed module, but it can also be executed as a script like
`python path/robot/libdoc.py`.
For more information about Libdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
import sys
import os
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.utils import Application, seq2str
from robot.errors import DataError
from robot.libdocpkg import LibraryDocumentation, ConsoleViewer
class LibDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,), auto_version=False)
def validate(self, options, arguments):
if ConsoleViewer.handles(arguments[1]):
ConsoleViewer.validate_command(arguments[1], arguments[2:])
elif len(arguments) > 2:
raise DataError('Only two arguments allowed when writing output.')
return options, arguments
def main(self, args, name='', version='', format=None, docformat=None):
lib_or_res, output = args[:2]
libdoc = LibraryDocumentation(lib_or_res, name, version,
self._get_doc_format(docformat))
if ConsoleViewer.handles(output):
ConsoleViewer(libdoc).view(output, *args[2:])
else:
libdoc.save(output, self._get_output_format(format, output))
self.console(os.path.abspath(output))
def _get_doc_format(self, format):
if not format:
return None
return self._verify_format('Doc format', format,
['ROBOT', 'TEXT', 'HTML', 'REST'])
def _get_output_format(self, format, output):
default = os.path.splitext(output)[1][1:]
return self._verify_format('Format', format or default, ['HTML', 'XML'])
def _verify_format(self, type, format, valid):
format = format.upper()
if format not in valid:
raise DataError("%s must be %s, got '%s'."
% (type, seq2str(valid, lastsep=' or '), format))
return format
def libdoc_cli(arguments):
"""Executes Libdoc similarly as from the command line.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`libdoc` function is typically better. It
has a better API for that usage and does not call :func:`sys.exit` like
this function.
Example::
from robot.libdoc import libdoc_cli
libdoc_cli(['--version', '1.0', 'MyLibrary.py', 'MyLibraryDoc.html'])
"""
LibDoc().execute_cli(arguments)
def libdoc(library_or_resource, outfile, name='', version='', format=None):
"""Executes libdoc.
Arguments have same semantics as Libdoc command line options with
same names.
Example::
from robot.libdoc import libdoc
libdoc('MyLibrary.py', 'MyLibraryDoc.html', version='1.0')
"""
LibDoc().execute(library_or_resource, outfile, name=name, version=version,
format=format)
if __name__ == '__main__':
libdoc_cli(sys.argv[1:])
| apache-2.0 |
Leoniela/nipype | nipype/interfaces/spm/tests/test_auto_Normalize.py | 9 | 2347 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.spm.preprocess import Normalize
def test_Normalize_inputs():
input_map = dict(DCT_period_cutoff=dict(field='eoptions.cutoff',
),
affine_regularization_type=dict(field='eoptions.regtype',
),
apply_to_files=dict(copyfile=True,
field='subj.resample',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
jobtype=dict(usedefault=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
nonlinear_iterations=dict(field='eoptions.nits',
),
nonlinear_regularization=dict(field='eoptions.reg',
),
out_prefix=dict(field='roptions.prefix',
usedefault=True,
),
parameter_file=dict(copyfile=False,
field='subj.matname',
mandatory=True,
xor=['source', 'template'],
),
paths=dict(),
source=dict(copyfile=True,
field='subj.source',
mandatory=True,
xor=['parameter_file'],
),
source_image_smoothing=dict(field='eoptions.smosrc',
),
source_weight=dict(copyfile=False,
field='subj.wtsrc',
),
template=dict(copyfile=False,
field='eoptions.template',
mandatory=True,
xor=['parameter_file'],
),
template_image_smoothing=dict(field='eoptions.smoref',
),
template_weight=dict(copyfile=False,
field='eoptions.weight',
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
write_bounding_box=dict(field='roptions.bb',
),
write_interp=dict(field='roptions.interp',
),
write_preserve=dict(field='roptions.preserve',
),
write_voxel_sizes=dict(field='roptions.vox',
),
write_wrap=dict(field='roptions.wrap',
),
)
inputs = Normalize.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Normalize_outputs():
output_map = dict(normalization_parameters=dict(),
normalized_files=dict(),
normalized_source=dict(),
)
outputs = Normalize.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
kyleabeauchamp/EnsemblePaper | code/model_building/fit_model.py | 1 | 1807 | import numpy as np
from fitensemble import belt, ensemble_fitter
import experiment_loader
import sys
import ALA3
belt.ne.set_num_threads(1)
def run(ff, prior, regularization_strength, bootstrap_index_list):
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
populations_filename = ALA3.data_directory + "/frame_populations/pops_%s_%s_reg-%.1f-BB%d.dat" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
predictions, measurements, uncertainties = experiment_loader.load(ff)
num_frames, num_measurements = predictions.shape
bootstrap_index_list = np.array_split(np.arange(num_frames), ALA3.num_blocks)
if bayesian_bootstrap_run == 0:
prior_pops = None
else:
prior_pops = ensemble_fitter.sample_prior_pops(num_frames, bootstrap_index_list)
if prior == "maxent":
model = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "dirichlet":
model = belt.DirichletBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "MVN":
model = belt.MVNBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
model.sample(ALA3.num_samples, thin=ALA3.thin, burn=ALA3.burn, filename=pymc_filename)
p = model.accumulate_populations()
np.savetxt(populations_filename, p)
if __name__ == "__main__":
ff = sys.argv[1]
prior = sys.argv[2]
regularization_strength = float(sys.argv[3])
bayesian_bootstrap_run = int(sys.argv[4])
run(ff, prior, regularization_strength, bayesian_bootstrap_run)
| gpl-3.0 |
harshaneelhg/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
dancingdan/tensorflow | tensorflow/python/autograph/utils/context_managers.py | 64 | 1708 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various context managers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
def control_dependency_on_returns(return_value):
"""Create a TF control dependency on the return values of a function.
If the function had no return value, a no-op context is returned.
Args:
return_value: The return value to set as control dependency.
Returns:
A context manager.
"""
def control_dependency_handle(t):
if isinstance(t, tensor_array_ops.TensorArray):
return t.flow
return t
if return_value is None:
return contextlib.contextmanager(lambda: (yield))()
# TODO(mdan): Filter to tensor objects.
if not isinstance(return_value, (list, tuple)):
return_value = (return_value,)
return_value = tuple(control_dependency_handle(t) for t in return_value)
return ops.control_dependencies(return_value)
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/language_processing/opennmt_py_test.py | 2 | 77277 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >>
# http://opennmt.net/
# https://github.com/OpenNMT/OpenNMT-py
import argparse, time
import torch
import torchtext
import onmt
import onmt.translate
import onmt.utils.parse
def save_model(model_filepath, model, generator):
#torch.save(model.state_dict(), model_filepath)
#torch.save({'state_dict': model.state_dict()}, model_filepath)
torch.save({'model': model.state_dict(), 'generator': generator.state_dict()}, model_filepath)
#torch.save({'model': model.state_dict(), 'generator': generator.state_dict(), 'optim': optim.state_dict()}, model_filepath)
print('Saved a model to {}.'.format(model_filepath))
def load_model(model_filepath, model, generator, device='cpu'):
"""
loaded_data = torch.load(model_filepath, map_location=device)
#model.load_state_dict(loaded_data)
model.load_state_dict(loaded_data['state_dict'])
print('Loaded a model from {}.'.format(model_filepath))
return model
"""
checkpoint = torch.load(model_filepath, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
#optim.load_state_dict(checkpoint['optim'])
#opt = checkpoint['opt']
#vocab = checkpoint['vocab']
#epoch = checkpoint['epoch']
print('Loaded a model from {}.'.format(model_filepath))
return model, generator
#--------------------------------------------------------------------
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/preprocess.py
def preprocess_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/preprocess.html
if False:
parser = onmt.utils.parse.ArgumentParser(description='preprocess_test')
onmt.opts.config_opts(parser)
onmt.opts.preprocess_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Data.
opt.data_type = 'img' # Type of the source input. Options are [text|img|audio|vec]. (default: text).
opt.train_src = ['data/im2text/src-train.txt'] # Path(s) to the training source data (default: None).
opt.train_tgt = ['data/im2text/tgt-train.txt'] # Path(s) to the training target data (default: None).
opt.train_align = [None] # Path(s) to the training src-tgt alignment (default: [None]).
opt.train_ids = [None] # IDs to name training shards, used for corpus weighting (default: [None]).
opt.valid_src = 'data/im2text/src-val.txt' # Path to the validation source data (default: None).
opt.valid_tgt = 'data/im2text/tgt-val.txt' # Path to the validation target data (default: None).
opt.valid_align = None # Path(s) to the validation src-tgt alignment (default: None).
opt.src_dir = 'data/im2text/images/' # Source directory for image or audio files. (default: ).
opt.save_data = 'data/im2text/demo' # Output file for the prepared data (default: None).
opt.max_shard_size = 0 # Deprecated use shard_size instead (default: 0).
opt.shard_size = 500 # Divide src_corpus and tgt_corpus into smaller multiple src_copus and tgt corpus files, then build shards, each shard will have opt.shard_size samples except last shard. shard_size=0 means no segmentation shard_size>0 means segment dataset into multiple shards, each shard has shard_size samples (default: 1000000)
opt.num_threads = 1 # Number of shards to build in parallel. (default: 1).
opt.overwrite = False # Overwrite existing shards if any. (default: False).
# Vocab.
opt.src_vocab = '' # Path to an existing source vocabulary. Format: one word per line. (default: ).
opt.tgt_vocab = '' # Path to an existing target vocabulary. Format: one word per line. (default: ).
opt.features_vocabs_prefix = '' # Path prefix to existing features vocabularies (default: ).
opt.src_vocab_size = 50000 # Size of the source vocabulary (default: 50000).
opt.tgt_vocab_size = 50000 # Size of the target vocabulary (default: 50000).
opt.vocab_size_multiple = 1 # Make the vocabulary size a multiple of this value (default: 1).
opt.src_words_min_frequency = 0
opt.tgt_words_min_frequency = 2
opt.dynamic_dict = False # Create dynamic dictionaries (default: False).
opt.share_vocab = False # Share source and target vocabulary (default: False).
# Pruning.
opt.src_seq_length = 50 # Maximum source sequence length (default: 50).
opt.src_seq_length_trunc = None # Truncate source sequence length. (default: None).
opt.tgt_seq_length = 150 # Maximum target sequence length to keep. (default: 50).
opt.tgt_seq_length_trunc = None # Truncate target sequence length. (default: None).
opt.lower = False # Lowercase data (default: False).
opt.filter_valid = False # Filter validation data by src and/or tgt length (default: False).
# Random.
opt.shuffle = 0 # Shuffle data (default: 0).
opt.seed = 3435 # Random seed (default: 3435).
# Logging.
opt.report_every = 100000 # Report status every this many sentences (default: 100000).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds. (default: 0.02).
opt.window_stride = 0.01 # Window stride for spectrogram in seconds. (default: 0.01).
opt.window = 'hamming' # Window type for spectrogram generation. (default: hamming).
# Image.
opt.image_channel_size = 1 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
# Noise.
opt.subword_prefix = '_' # Subword prefix to build wordstart mask (default: _).
opt.subword_prefix_is_joiner = False # mask will need to be inverted if prefix is joiner (default: False).
print('Preprocess options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.preprocess.preprocess(opt)
#------------------------------------------------------------
# REF [function] >> preprocess() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/preprocess.py.
onmt.utils.parse.ArgumentParser.validate_preprocess_args(opt)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/train.py
def train_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/train.html
if True:
parser = onmt.utils.parse.ArgumentParser(description='train_test')
onmt.opts.config_opts(parser)
onmt.opts.model_opts(parser)
onmt.opts.train_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Model-Embeddings.
opt.src_word_vec_size = 80 # Word embedding size for src. (default: 500).
opt.tgt_word_vec_size = 80 # Word embedding size for tgt. (default: 500).
opt.word_vec_size = 80 # Word embedding size for src and tgt. (default: -1).
opt.share_decoder_embeddings = False # Use a shared weight matrix for the input and output word embeddings in the decoder. (default: False).
opt.share_embeddings = False # Share the word embeddings between encoder and decoder. Need to use shared dictionary for this option. (default: False).
opt.position_encoding = False # Use a sin to mark relative words positions. Necessary for non-RNN style models. (default: False).
# Model: Embedding Features.
opt.feat_merge = 'concat' # Merge action for incorporating features embeddings. Options [concat|sum|mlp]. (default: concat).
opt.feat_vec_size = -1 # If specified, feature embedding sizes will be set to this. Otherwise, feat_vec_exponent will be used. (default: -1).
opt.feat_vec_exponent = 0.7 # If -feat_merge_size is not set, feature embedding sizes will be set to N^feat_vec_exponent where N is the number of values the feature takes. (default: 0.7).
# Model: Encoder-Decoder.
opt.model_type = 'img' # Type of source model to use. Allows the system to incorporate non-text inputs. Options are [text|img|audio|vec]. (default: text).
opt.model_dtype = 'fp32' # Data type of the model. {fp32, fp16}. (default: fp32).
opt.encoder_type = 'brnn' # Type of encoder layer to use. Non-RNN layers are experimental. Options are [rnn|brnn|mean|transformer|cnn]. (default: rnn).
opt.decoder_type = 'rnn' # Type of decoder layer to use. Non-RNN layers are experimental. Options are [rnn|transformer|cnn]. (default: rnn).
opt.layers = -1 # Number of layers in enc/dec. (default: -1).
opt.enc_layers = 2 # Number of layers in the encoder (default: 2).
opt.dec_layers = 2 # Number of layers in the decoder (default: 2).
opt.rnn_size = -1 # Size of rnn hidden states. Overwrites enc_rnn_size and dec_rnn_size (default: -1).
opt.enc_rnn_size = 500 # Size of encoder rnn hidden states. Must be equal to dec_rnn_size except for speech-to-text. (default: 500).
opt.dec_rnn_size = 500 # Size of decoder rnn hidden states. Must be equal to enc_rnn_size except for speech-to-text. (default: 500).
opt.audio_enc_pooling = '1' # The amount of pooling of audio encoder, either the same amount of pooling across all layers indicated by a single number, or different amounts of pooling per layer separated by comma. (default: 1).
opt.cnn_kernel_width = 3 # Size of windows in the cnn, the kernel_size is (cnn_kernel_width, 1) in conv layer (default: 3).
opt.input_feed = 1 # Feed the context vector at each time step as additional input (via concatenation with the word embeddings) to the decoder. (default: 1).
opt.bridge = False # Have an additional layer between the last encoder state and the first decoder state (default: False).
opt.rnn_type = 'LSTM' # The gate type to use in the RNNs {LSTM, GRU, SRU} (default: LSTM).
opt.brnn = None # Deprecated, use 'encoder_type'. (default: None).
opt.context_gate = None # Type of context gate to use. Do not select for no context gate. {source, target, both} (default: None).
# Model: Attention.
opt.global_attention = 'general' # The attention type to use: dotprod or general (Luong) or MLP (Bahdanau) {dot, general, mlp, none} (default: general).
opt.global_attention_function = 'softmax' # {softmax, sparsemax}.
opt.self_attn_type = 'scaled-dot' # Self attention type in Transformer decoder layer -- currently "scaled-dot" or "average" (default: scaled-dot).
opt.max_relative_positions = 0 # Maximum distance between inputs in relative positions representations. For more detailed information, see: https://arxiv.org/pdf/1803.02155.pdf (default: 0).
opt.heads = 8 # Number of heads for transformer self-attention (default: 8).
opt.transformer_ff = 2048 # Size of hidden transformer feed-forward (default: 2048).
opt.aan_useffn = False # Turn on the FFN layer in the AAN decoder (default: False).
# Model: Alignement.
opt.lambda_align = 0.0 # Lambda value for alignement loss of Garg et al (2019) For more detailed information, see: https://arxiv.org/abs/1909.02074 (default: 0.0).
opt.alignment_layer = -3 # Layer number which has to be supervised. (default: -3).
opt.alignment_heads = 0 # N. of cross attention heads per layer to supervised with (default: 0).
opt.full_context_alignment = False # Whether alignment is conditioned on full target context. (default: False).
# Generator.
opt.copy_attn = False # Train copy attention layer. (default: False).
opt.copy_attn_type = 'general' # The copy attention type to use. Leave as None to use the same as -global_attention. {dot, general, mlp, none} (default: None).
opt.generator_function = 'softmax' # Which function to use for generating probabilities over the target vocabulary (choices: softmax, sparsemax) (default: softmax).
opt.copy_attn_force = False # When available, train to copy. (default: False).
opt.reuse_copy_attn = False # Reuse standard attention for copy (default: False).
opt.copy_loss_by_seqlength = False # Divide copy loss by length of sequence (default: False).
opt.coverage_attn = False # Train a coverage attention layer. (default: False).
opt.lambda_coverage = 0.0 # Lambda value for coverage loss of See et al (2017) (default: 0.0).
opt.loss_scale = 0 # For FP16 training, the static loss scale to use. If not set, the loss scale is dynamically computed. (default: 0).
opt.apex_opt_level = 'O1' # For FP16 training, the opt_level to use. See https://nvidia.github.io/apex/amp.html#opt-levels. {O0, O1, O2, O3} (default: O1).
# General.
opt.data = 'data/im2text/demo' # Path prefix to the ".train.pt" and ".valid.pt" file path from preprocess.py (default: None).
opt.data_ids = [None] # In case there are several corpora. (default: [None]).
opt.data_weights = [1] # Weights of different corpora, should follow the same order as in -data_ids. (default: [1]).
opt.data_to_noise = [] # IDs of datasets on which to apply noise. (default: []).
opt.save_model = 'demo-model' # Model filename (the model will be saved as <save_model>_N.pt where N is the number of steps (default: model).
opt.save_checkpoint_steps = 5000 # Save a checkpoint every X steps (default: 5000).
opt.keep_checkpoint = -1 # Keep X checkpoints (negative: keep all) (default: -1).
opt.gpuid = [] # Deprecated see world_size and gpu_ranks. (default: []).
opt.gpu_ranks = [0] # List of ranks of each process. (default: []).
opt.world_size = 1 # Total number of distributed processes. (default: 1).
opt.gpu_backend = 'nccl' # Type of torch distributed backend (default: nccl).
opt.gpu_verbose_level = 0 # Gives more info on each process per GPU. (default: 0).
opt.master_ip = 'localhost' # IP of master for torch.distributed training. (default: localhost).
opt.master_port = 10000 # Port of master for torch.distributed training. (default: 10000).
opt.queue_size = 40 # Size of queue for each process in producer/consumer (default: 40).
opt.seed = -1 # Random seed used for the experiments reproducibility. (default: -1).
# Initialization.
opt.param_init = 0.1 # Parameters are initialized over uniform distribution with support (-param_init, param_init). Use 0 to not use initialization (default: 0.1).
opt.param_init_glorot = False # Init parameters with xavier_uniform. Required for transformer. (default: False).
opt.train_from = '' # If training from a checkpoint then this is the path to the pretrained model's state_dict. (default: ).
opt.reset_optim = 'none' # Optimization resetter when train_from. {none, all, states, keep_states} (default: none).
opt.pre_word_vecs_enc = None # If a valid path is specified, then this will load pretrained word embeddings on the encoder side. See README for specific formatting instructions. (default: None).
opt.pre_word_vecs_dec = None # If a valid path is specified, then this will load pretrained word embeddings on the decoder side. See README for specific formatting instructions. (default: None)
opt.fix_word_vecs_enc = False # Fix word embeddings on the encoder side. (default: False).
opt.fix_word_vecs_dec = False # Fix word embeddings on the decoder side. (default: False).
# Optimization: Type.
opt.batch_size = 20 # Maximum batch size for training (default: 64).
opt.batch_type = 'sents' # Batch grouping for batch_size. Standard is sents. Tokens will do dynamic batching {sents, tokens} (default: sents).
opt.pool_factor = 8192 # Factor used in data loading and batch creations. It will load the equivalent of 'pool_factor' batches, sort them by the according 'sort_key' to produce homogeneous batches and reduce padding, and yield the produced batches in a shuffled way. Inspired by torchtext's pool mechanism. (default: 8192).
opt.normalization = 'sents' # Normalization method of the gradient. {sents, tokens} (default: sents).
opt.accum_count = [1] # Accumulate gradient this many times. Approximately equivalent to updating batch_size * accum_count batches at once. Recommended for Transformer. (default: [1]).
opt.accum_steps = [0] # Steps at which accum_count values change (default: [0]).
opt.valid_steps = 10000 # Perfom validation every X steps (default: 10000).
opt.valid_batch_size = 32 # Maximum batch size for validation (default: 32).
opt.max_generator_batches = 32 # Maximum batches of words in a sequence to run the generator on in parallel. Higher is faster, but uses more memory. Set to 0 to disable. (default: 32).
opt.train_steps = 100000 # Number of training steps (default: 100000).
opt.single_pass = False # Make a single pass over the training dataset. (default: False).
opt.epochs = 0 # Deprecated epochs see train_steps (default: 0).
opt.early_stopping = 0 # Number of validation steps without improving. (default: 0).
opt.early_stopping_criteria = None # Criteria to use for early stopping. (default: None).
opt.optim = 'sgd' # Optimization method. {sgd, adagrad, adadelta, adam, sparseadam, adafactor, fusedadam} (default: sgd).
opt.adagrad_accumulator_init = 0 # Initializes the accumulator values in adagrad. Mirrors the initial_accumulator_value option in the tensorflow adagrad (use 0.1 for their default). (default: 0).
opt.max_grad_norm = 20.0 # If the norm of the gradient vector exceeds this, renormalize it to have the norm equal to max_grad_norm (default: 5).
opt.dropout = [0.3] # Dropout probability; applied in LSTM stacks. (default: [0.3]).
opt.attention_dropout = [0.1] # Attention Dropout probability. (default: [0.1]).
opt.dropout_steps = [0] # Steps at which dropout changes. (default: [0]).
opt.truncated_decoder = 0 # Truncated bptt. (default: 0).
opt.adam_beta1 = 0.9 # The beta1 parameter used by Adam. Almost without exception a value of 0.9 is used in the literature, seemingly giving good results, so we would discourage changing this value from the default without due consideration. (default: 0.9).
opt.adam_beta2 = 0.999 # The beta2 parameter used by Adam. Typically a value of 0.999 is recommended, as this is the value suggested by the original paper describing Adam, and is also the value adopted in other frameworks such as Tensorflow and Keras, i.e. see: https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer or https://keras.io/optimizers/. Whereas recently the paper "Attention is All You Need" suggested a value of 0.98 for beta2, this parameter may not work well for normal models / default baselines. (default: 0.999)
opt.label_smoothing = 0.0 # Label smoothing value epsilon. Probabilities of all non-true labels will be smoothed by epsilon / (vocab_size - 1). Set to zero to turn off label smoothing. For more detailed information, see: https://arxiv.org/abs/1512.00567 (default: 0.0).
opt.average_decay = 0 # Moving average decay. Set to other than 0 (e.g. 1e-4) to activate. Similar to Marian NMT implementation: http://www.aclweb.org/anthology/P18-4020 For more detail on Exponential Moving Average: https://en.wikipedia.org/wiki/Moving_average (default: 0).
opt.average_every = 1 # Step for moving average. Default is every update, if -average_decay is set. (default: 1).
opt.src_noise = [] # {sen_shuffling, infilling, mask}.
opt.src_noise_prob = [] # Probabilities of src_noise functions (default: []).
# Optimization: Rate.
opt.learning_rate = 0.1 # Starting learning rate. Recommended settings: sgd = 1, adagrad = 0.1, adadelta = 1, adam = 0.001 (default: 1.0).
opt.learning_rate_decay = 0.5 # If update_learning_rate, decay learning rate by this much if steps have gone past start_decay_steps (default: 0.5).
opt.start_decay_steps = 50000 # Start decaying every decay_steps after start_decay_steps (default: 50000).
opt.decay_steps = 10000 # Decay every decay_steps (default: 10000).
opt.decay_method = 'none' # Use a custom decay rate. (default: none).
opt.warmup_steps = 4000 # Number of warmup steps for custom decay. (default: 4000).
# Logging.
opt.report_every = 50 # Print stats at this interval. (default: 50).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
opt.exp_host = '' # Send logs to this crayon server. (default: ).
opt.exp = '' # Name of the experiment for logging. (default: ).
opt.tensorboard = False # Use tensorboard for visualization during training. Must have the library tensorboard >= 1.14. (default: False).
opt.tensorboard_log_dir = 'runs/onmt' # Log directory for Tensorboard. This is also the name of the run. (default: runs/onmt).
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds. (default: 0.02).
# Image.
opt.image_channel_size = 1 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
print('Train options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.train.train(opt)
#------------------------------------------------------------
# REF [function] >> train() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/train.py.
onmt.utils.parse.ArgumentParser.validate_train_opts(opt)
#onmt.utils.parse.ArgumentParser.update_model_opts(opt)
#onmt.utils.parse.ArgumentParser.validate_model_opts(opt)
# REF [function] >> main() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/train_single.py.
if opt.train_from:
onmt.utils.logging.logger.info('Loading checkpoint from {}.'.format(opt.train_from))
checkpoint = torch.load(opt.train_from, map_location=lambda storage, loc: storage)
model_opt = onmt.utils.parse.ArgumentParser.ckpt_model_opts(checkpoint['opt'])
onmt.utils.parse.ArgumentParser.update_model_opts(model_opt)
onmt.utils.parse.ArgumentParser.validate_model_opts(model_opt)
onmt.utils.logging.logger.info('Loading vocab from checkpoint at {}.'.format(opt.train_from))
vocab = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
onmt.utils.parse.ArgumentParser.update_model_opts(model_opt)
onmt.utils.parse.ArgumentParser.validate_model_opts(model_opt)
vocab = torch.load(opt.data + '.vocab.pt')
fields = vocab
device_id = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Build a model.
model = onmt.model_builder.build_model(model_opt, opt, fields, checkpoint=None)
generator = None # FIXME [implement] >>
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optimizer = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
model_saver = onmt.models.build_model_saver(model_opt, opt, model, fields, optimizer)
#model_saver = None
trainer = onmt.trainer.build_trainer(opt, device_id, model, fields, optimizer, model_saver=model_saver)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/translate.py
def translate_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/translate.html
if True:
parser = onmt.utils.parse.ArgumentParser(description='translate_test')
onmt.opts.config_opts(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Model.
opt.model = [] # Path to model .pt file(s). Multiple models can be specified, for ensemble decoding. (default: []).
opt.fp32 = False # Force the model to be in FP32 because FP16 is very slow on GTX1080(ti). (default: False).
opt.avg_raw_probs = False # If this is set, during ensembling scores from different models will be combined by averaging their raw probabilities and then taking the log. Otherwise, the log probabilities will be averaged directly. Necessary for models whose output layers can assign zero probability. (default: False).
# Data.
opt.data_type = 'text' # Type of the source input. Options: [text | img]. (default: text).
opt.src = None # Source sequence to decode (one line per sequence) (default: None).
opt.src_dir = '' # Source directory for image or audio files (default: ).
opt.tgt = None # True target sequence (optional) (default: None).
opt.shard_size = 10000 # Divide src and tgt (if applicable) into smaller multiple src and tgt files, then build shards, each shard will have opt.shard_size samples except last shard. shard_size=0 means no segmentation shard_size>0 means segment dataset into multiple shards, each shard has shard_size samples (default: 10000).
opt.output = 'pred.txt' # Path to output the predictions (each line will be the decoded sequence (default: pred.txt).
opt.report_align = False # Report alignment for each translation. (default: False).
opt.report_time = False # Report some translation time metrics (default: False).
opt.dynamic_dict = False # Create dynamic dictionaries (default: False).
opt.share_vocab = False # Share source and target vocabulary (default: False).
# Random Sampling.
opt.random_sampling_topk = 1 # Set this to -1 to do random sampling from full distribution. Set this to value k>1 to do random sampling restricted to the k most likely next tokens. Set this to 1 to use argmax or for doing beam search. (default: 1).
opt.random_sampling_temp = 1.0 # If doing random sampling, divide the logits by this before computing softmax during decoding. (default: 1.0).
opt.seed = 829 # Random seed (default: 829).
# Beam.
opt.beam_size = 5 # Beam size (default: 5).
opt.min_length = 0 # Minimum prediction length (default: 0).
opt.max_length = 100 # Maximum prediction length. (default: 100).
opt.max_sent_length = None # Deprecated, use '-max_length' instead (default: None).
opt.stepwise_penalty = False # Apply penalty at every decoding step. Helpful for summary penalty. (default: False).
opt.length_penalty = 'none' # Length Penalty to use. {none, wu, avg} (default: none).
opt.ratio = -0.0 # Ratio based beam stop condition (default: -0.0).
opt.coverage_penalty = 'none' # Coverage Penalty to use. {none, wu, summary} (default: none).
opt.alpha = 0.0 # Google NMT length penalty parameter (higher = longer generation) (default: 0.0).
opt.beta = -0.0 # Coverage penalty parameter (default: -0.0).
opt.block_ngram_repeat = 0 # Block repetition of ngrams during decoding. (default: 0).
opt.ignore_when_blocking = [] # Ignore these strings when blocking repeats. You want to block sentence delimiters. (default: []).
opt.replace_unk = False # Replace the generated UNK tokens with the source token that had highest attention weight. If phrase_table is provided, it will look up the identified source token and give the corresponding target token. If it is not provided (or the identified source token does not exist in the table), then it will copy the source token. (default: False).
opt.phrase_table = '' # If phrase_table is provided (with replace_unk), it will look up the identified source token and give the corresponding target token. If it is not provided (or the identified source token does not exist in the table), then it will copy the source token. (default: )
# Logging.
opt.verbose = False # Print scores and predictions for each sentence (default: False).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
opt.attn_debug = False # Print best attn for each word (default: False).
opt.align_debug = False # Print best align for each word (default: False).
opt.dump_beam = '' # File to dump beam information to. (default: ).
opt.n_best = 1 # If verbose is set, will output the n_best decoded sentences (default: 1).
# Efficiency.
opt.batch_size = 30 # Batch size (default: 30).
opt.batch_type = 'sents' # Batch grouping for batch_size. Standard is sents. Tokens will do dynamic batching {sents, tokens} (default: sents).
opt.gpu = -1 # Device to run on (default: -1).
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds (default: 0.02).
opt.window_stride = 0.01 # Window stride for spectrogram in seconds (default: 0.01).
opt.window = 'hamming' # Window type for spectrogram generation (default: hamming).
# Image.
opt.image_channel_size = 3 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
print('Translate options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.translate.translate(opt)
#------------------------------------------------------------
# REF [function] >> translate() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/translate.py.
onmt.utils.parse.ArgumentParser.validate_translate_opts(opt)
logger = onmt.utils.logging.init_logger(opt.log_file)
translator = onmt.translate.translator.build_translator(opt, report_score=True, logger=None, out_file=None)
src_shards = onmt.utils.misc.split_corpus(opt.src, opt.shard_size)
tgt_shards = onmt.utils.misc.split_corpus(opt.tgt, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards)
for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
logger.info('Translating shard {}.'.format(i))
translator.translate(
src=src_shard,
tgt=tgt_shard,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
batch_type=opt.batch_type,
attn_debug=opt.attn_debug,
align_debug=opt.align_debug
)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/server.py
def server_test():
raise NotImplementedError
#--------------------------------------------------------------------
# REF [site] >> https://opennmt.net/OpenNMT-py/Library.html
def library_example():
is_trained, is_model_loaded = True, False
preprocessed_data_dir_path = './data'
if is_trained:
model_filepath = './onmt_library_model.pt'
if is_model_loaded:
model_filepath_to_load = None
assert not is_model_loaded or (is_model_loaded and model_filepath_to_load is not None)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
gpu = 0 if torch.cuda.is_available() else -1
#--------------------
# Prepare data.
# Load in the vocabulary for the model of interest.
vocab_fields = torch.load(preprocessed_data_dir_path + '/data.vocab.pt')
train_data_files = [
preprocessed_data_dir_path + '/data.train.0.pt'
]
valid_data_files = [
preprocessed_data_dir_path + '/data.valid.0.pt'
]
src_text_field = vocab_fields['src'].base_field
src_vocab = src_text_field.vocab
src_padding = src_vocab.stoi[src_text_field.pad_token]
tgt_text_field = vocab_fields['tgt'].base_field
tgt_vocab = tgt_text_field.vocab
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
train_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=train_data_files, fields=vocab_fields,
batch_size=50, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=True, repeat=True
)
valid_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=valid_data_files, fields=vocab_fields,
batch_size=10, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=False, repeat=False
)
#--------------------
# Build a model.
emb_size = 100
rnn_size = 500
# Specify the core model.
encoder_embeddings = onmt.modules.Embeddings(emb_size, len(src_vocab), word_padding_idx=src_padding)
encoder = onmt.encoders.RNNEncoder(
hidden_size=rnn_size, num_layers=1, bidirectional=True,
rnn_type='LSTM', embeddings=encoder_embeddings
)
decoder_embeddings = onmt.modules.Embeddings(emb_size, len(tgt_vocab), word_padding_idx=tgt_padding)
decoder = onmt.decoders.decoder.InputFeedRNNDecoder(
hidden_size=rnn_size, num_layers=1, bidirectional_encoder=True,
rnn_type='LSTM', embeddings=decoder_embeddings
)
model = onmt.models.model.NMTModel(encoder, decoder)
# Specify the tgt word generator.
model.generator = torch.nn.Sequential(
torch.nn.Linear(rnn_size, len(tgt_vocab)),
torch.nn.LogSoftmax(dim=-1)
)
if is_model_loaded:
model, model.generator = load_model(model_filepath_to_load, model, model.generator, device=device)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
if is_trained:
# Specify loss computation module.
loss = onmt.utils.loss.NMTLossCompute(
criterion=torch.nn.NLLLoss(ignore_index=tgt_padding, reduction='sum'),
generator=model.generator
)
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optim = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
# Keeping track of the output requires a report manager.
report_manager = onmt.utils.ReportMgr(report_every=50, start_time=None, tensorboard_writer=None)
trainer = onmt.Trainer(
model=model, train_loss=loss, valid_loss=loss,
optim=optim, report_manager=report_manager
)
print('Start training...')
start_time = time.time()
total_stats = trainer.train(
train_iter=train_iter, train_steps=400,
valid_iter=valid_iter, valid_steps=200
)
print('End training: {} secs.'.format(time.time() - start_time))
print('Train: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(total_stats.accuracy(), total_stats.xent(), total_stats.ppl()))
save_model(model_filepath, model, model.generator)
#--------------------
# Load up the translation functions.
src_reader = onmt.inputters.str2reader['text']
tgt_reader = onmt.inputters.str2reader['text']
scorer = onmt.translate.GNMTGlobalScorer(alpha=0.7, beta=0.0, length_penalty='avg', coverage_penalty='none')
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader(), tgt_reader=tgt_reader(),
global_scorer=scorer, gpu=gpu
)
# Build a word-based translation from the batch output of translator and the underlying dictionaries.
builder = onmt.translate.TranslationBuilder(data=torch.load(valid_data_files[0]), fields=vocab_fields)
for batch in valid_iter:
print('Start translating...')
start_time = time.time()
trans_batch = translator.translate_batch(batch=batch, src_vocabs=[src_vocab], attn_debug=False)
print('End translating: {} secs.'.format(time.time() - start_time))
translations = builder.from_batch(trans_batch)
for trans in translations:
print(trans.log(0))
#--------------------------------------------------------------------
def build_im2latex_model(input_channel, num_classes, word_vec_size):
bidirectional_encoder = False
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = 500
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = onmt.encoders.ImageEncoder(
num_layers=encoder_num_layers, bidirectional=bidirectional_encoder,
rnn_size=encoder_rnn_size, dropout=encoder_dropout, image_chanel_size=input_channel
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
model = onmt.models.NMTModel(encoder, decoder)
return model, generator
class MyImageEncoder(onmt.encoders.encoder.EncoderBase):
def __init__(self, image_height, input_channel, hidden_size, num_layers, bidirectional=False):
super().__init__()
assert image_height % 16 == 0, 'image_height has to be a multiple of 16'
self.image_height = image_height
# Build a model.
# This implementation assumes that input size is h x w.
self.cnn = torch.nn.Sequential(
torch.nn.Conv2d(input_channel, 64, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d(2, 2), # 64 x h/2 x w/2.
torch.nn.Conv2d(64, 128, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d(2, 2), # 128 x h/4 x w/4.
torch.nn.Conv2d(128, 256, 3, 1, 1), torch.nn.BatchNorm2d(256), torch.nn.ReLU(True), # 256 x h/4 x w/4.
torch.nn.Conv2d(256, 256, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d((2, 2), (2, 1), (0, 1)), # 256 x h/8 x w/4+1.
torch.nn.Conv2d(256, 512, 3, 1, 1), torch.nn.BatchNorm2d(512), torch.nn.ReLU(True), # 512 x h/8 x w/4+1.
torch.nn.Conv2d(512, 512, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d((2, 2), (2, 1), (0, 1)), # 512 x h/16 x w/4+2.
torch.nn.Conv2d(512, 512, 2, 1, 0), torch.nn.BatchNorm2d(512), torch.nn.ReLU(True) # 512 x h/16-1 x w/4+1.
)
num_features = (image_height // 16 - 1) * 512
#import rare.crnn_lang
#self.rnn = torch.nn.Sequential(
# rare.crnn_lang.BidirectionalLSTM(num_features, hidden_size, hidden_size),
# rare.crnn_lang.BidirectionalLSTM(hidden_size, hidden_size, hidden_size)
#)
self.sequence_rnn = torch.nn.LSTM(num_features, hidden_size, num_layers=num_layers, bidirectional=bidirectional, batch_first=False)
if bidirectional:
self.sequence_projector = torch.nn.Linear(hidden_size * 2, hidden_size * 2)
#self.sequence_projector = torch.nn.Linear(hidden_size * 2, hidden_size)
else:
self.sequence_projector = torch.nn.Linear(hidden_size, hidden_size)
def forward(self, src, lengths=None):
# NOTE [info] >> This resizing is not good.
#src = torch.nn.functional.upsample(src, size=(self.image_height, int(src.shape[3] * self.image_height / src.shape[2])), mode='bilinear')
src = torch.nn.functional.upsample(src, size=(self.image_height, src.shape[3]), mode='bilinear')
# Conv features.
conv = self.cnn(src) # [b, c_out, h/16-1, w/4+1].
b, c, h, w = conv.size()
#assert h == 1, 'The height of conv must be 1'
#conv = conv.squeeze(2) # [b, c_out, w/4+1].
conv = conv.reshape(b, -1, w) # [b, c_out * h/16-1, w/4+1].
conv = conv.permute(2, 0, 1) # [w/4+1, b, c_out * h/16-1].
# RNN features.
#enc_outputs, enc_hiddens = self.rnn((conv, None)) # [w/4+1, b, hidden size], ([#directions, b, hidden size], [#directions, b, hidden size]).
enc_outputs, enc_hiddens = self.sequence_rnn(conv) # [w/4+1, b, #directions * hidden size], ([#layers * #directions, b, hidden size], [#layers * #directions, b, hidden size]).
enc_outputs = self.sequence_projector(enc_outputs) # [w/4+1, b, hidden size].
return enc_hiddens, enc_outputs, lengths
def build_my_im2latex_model(image_height, input_channel, num_classes, word_vec_size):
bidirectional_encoder = False
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = encoder_rnn_size * 2 if bidirectional_encoder else encoder_rnn_size
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = MyImageEncoder(
image_height, input_channel,
hidden_size=encoder_rnn_size, num_layers=encoder_num_layers, bidirectional=bidirectional_encoder
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
model = onmt.models.NMTModel(encoder, decoder)
return model, generator
# REF [function] >> Translator.translate_batch() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def create_greedy_search_strategy(batch_size, random_sampling_topk, random_sampling_temp, min_length, max_length, block_ngram_repeat, bos_index, eos_index, pad_index, exclusion_idxs):
replace_unk = False
#tgt_prefix = False
attn_debug = False
return onmt.translate.greedy_search.GreedySearch(
pad=pad_index, bos=bos_index, eos=eos_index,
batch_size=batch_size,
min_length=min_length, max_length=max_length,
block_ngram_repeat=block_ngram_repeat,
exclusion_tokens=exclusion_idxs,
return_attention=attn_debug or replace_unk,
sampling_temp=random_sampling_temp,
keep_topk=random_sampling_topk
)
# REF [function] >> Translator.translate_batch() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def create_beam_search_strategy(batch_size, scorer, beam_size, n_best, ratio, min_length, max_length, block_ngram_repeat, bos_index, eos_index, pad_index, exclusion_idxs):
stepwise_penalty = None,
replace_unk = False
#tgt_prefix = False
attn_debug = False
return onmt.translate.beam_search.BeamSearch(
beam_size,
batch_size=batch_size,
pad=pad_index, bos=bos_index, eos=eos_index,
n_best=n_best,
global_scorer=scorer,
min_length=min_length, max_length=max_length,
return_attention=attn_debug or replace_unk,
block_ngram_repeat=block_ngram_repeat,
exclusion_tokens=exclusion_idxs,
stepwise_penalty=stepwise_penalty,
ratio=ratio
)
# REF [function] >> Translator._decode_and_generate() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def decode_and_generate(model, decoder_in, memory_bank, batch, src_vocabs, memory_lengths, beam_size, copy_attn, tgt_vocab, tgt_unk_idx, src_map=None, step=None, batch_offset=None):
if copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(decoder_in.gt(len(tgt_vocab) - 1), tgt_unk_idx)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = model.decoder(decoder_in, memory_bank, memory_lengths=memory_lengths, step=step)
# Generator forward.
if not copy_attn:
if 'std' in dec_attn:
attn = dec_attn['std']
else:
attn = None
log_probs = model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn['copy']
scores = model.generator(dec_out.view(-1, dec_out.size(2)), attn.view(-1, attn.size(2)), src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, beam_size, scores.size(-1))
scores = onmt.modules.copy_generator.collapse_copy_scores(
scores,
batch,
tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
# REF [function] >> Translator._translate_batch_with_strategy() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
# _translate_batch_with_strategy()
# _run_encoder()
# _gold_score()
# _score_target()
# _decode_and_generate()
# _decode_and_generate()
# _align_forward()
# _run_encoder()
def translate_batch_with_strategy(model, decode_strategy, src, batch_size, beam_size, unk_index, tgt_vocab, src_vocabs=[]):
copy_attn = False # Fixed.
report_align = False # Fixed.
parallel_paths = decode_strategy.parallel_paths # beam_size.
enc_states, memory_bank, src_lengths = model.encoder(src, lengths=None)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(memory_bank).long().fill_(memory_bank.size(0))
model.decoder.init_state(src, memory_bank, enc_states)
src_map, target_prefix = None, None
fn_map_state, memory_bank, memory_lengths, src_map = decode_strategy.initialize(memory_bank, src_lengths, src_map, target_prefix)
if fn_map_state is not None:
model.decoder.map_state(fn_map_state)
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn = decode_and_generate(
model,
decoder_input,
memory_bank,
batch=None, # NOTE [caution] >>
src_vocabs=src_vocabs,
memory_lengths=memory_lengths,
beam_size=beam_size, copy_attn=copy_attn,
tgt_vocab=tgt_vocab, tgt_unk_idx=unk_index,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset
)
decode_strategy.advance(log_probs, attn)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices) for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
model.decoder.map_state(lambda state, dim: state.index_select(dim, select_indices))
results = dict()
results['scores'] = decode_strategy.scores
results['predictions'] = decode_strategy.predictions
results['attention'] = decode_strategy.attention
if report_align:
results['alignment'] = self._align_forward(batch, decode_strategy.predictions)
else:
results['alignment'] = [[] for _ in range(batch_size)]
return results
def im2latex_example():
src_data_type, tgt_data_type = 'img', 'text'
input_channel = 3
num_classes = 466
word_vec_size = 500
batch_size = 32
train_steps, valid_steps, save_checkpoint_steps = 400, 200, 200
#train_steps, valid_steps, save_checkpoint_steps = 10000, 1000, 5000
is_trained, is_model_loaded = True, True
is_small_data_used = True
is_my_model_used = False # Use an image encoder (RARE) for me to define.
is_preprocessed_vocab_used, is_preprocessed_data_iterators_used = True, True
image_height = 64 if is_my_model_used else None
if is_small_data_used:
# For im2text_small.
# REF [site] >> http://lstm.seas.harvard.edu/latex/im2text_small.tgz
preprocessed_data_dir_path = './data/im2text_small'
num_train_data_files, num_valid_data_files = 2, 1
else:
# For im2text.
# REF [site] >> http://lstm.seas.harvard.edu/latex/im2text.tgz
preprocessed_data_dir_path = './data/im2text'
num_train_data_files, num_valid_data_files = 153, 17
if is_trained:
if is_my_model_used:
model_filepath = './data/im2latex_my_model.pt'
else:
model_filepath = './data/im2latex_model.pt'
if is_model_loaded:
if is_my_model_used:
model_filepath_to_load = './data/im2latex_my_model.pt'
else:
# Downloaded from http://lstm.seas.harvard.edu/latex/py-model.pt.
model_filepath_to_load = './data/py-model.pt'
#model_filepath_to_load = './data/im2latex_model.pt'
assert not is_model_loaded or (is_model_loaded and model_filepath_to_load is not None)
gpu = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Prepare data.
def read_lines_from_file(filepath):
try:
with open(filepath, 'r', encoding='utf-8') as fd:
lines = fd.read().splitlines() # A list of strings.
return lines
except UnicodeDecodeError as ex:
print('Unicode decode error in {}: {}.'.format(filepath, ex))
raise
except FileNotFoundError as ex:
print('File not found, {}: {}.'.format(filepath, ex))
raise
# REF [site] >> https://opennmt.net/OpenNMT-py/im2text.html
# NOTE [info] >> Two vocab_fields's are different, so a model has to be trained.
# If not, wrong results will be obtained.
if is_preprocessed_vocab_used:
# NOTE [info] >> When preprocessing data by onmt_preprocess or ${OpenNMT-py_HOME}/onmt/bin/preprocess.py.
# Load in the vocabulary for the model of interest.
vocab_fields = torch.load(preprocessed_data_dir_path + '/demo.vocab.pt')
else:
#UNKNOWN_TOKEN, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN = '<UNK>', '<PAD>', '<SOS>', '<EOS>'
UNKNOWN_TOKEN, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN = '<unk>', '<blank>', '<s>', '</s>'
def preprocess(x):
return x
def postprocess(batch, vocab):
if len(batch) == 1: return batch[0].unsqueeze(dim=0)
max_height, max_width = max([tt.shape[1] for tt in batch]), max([tt.shape[2] for tt in batch])
batch_resized = torch.zeros((len(batch), 3, max_height, max_width), dtype=batch[0].dtype)
for idx, tt in enumerate(batch):
batch_resized[idx, :, :tt.shape[1], :tt.shape[2]] = tt
return batch_resized
# REF [function] >> image_fields() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py.
src_field = torchtext.data.Field(
sequential=False, use_vocab=False, init_token=None, eos_token=None, fix_length=None,
#dtype=torch.float32, preprocessing=preprocess, postprocessing=postprocess, lower=False,
dtype=torch.float32, preprocessing=None, postprocessing=postprocess, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
tgt_field = torchtext.data.Field(
sequential=True, use_vocab=True, init_token=SOS_TOKEN, eos_token=EOS_TOKEN, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
#tokenize=functools.partial(onmt.inputters.inputter._feature_tokenize, layer=0, feat_delim=None, truncate=None), tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=PAD_TOKEN, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
indices_field = torchtext.data.Field(
sequential=False, use_vocab=False, init_token=None, eos_token=None, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
corpus_id_field = torchtext.data.Field(
sequential=False, use_vocab=True, init_token=None, eos_token=None, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
# NOTE [info] >> It is better to build a vocabulary from corpora.
# TODO [choose] >>
if True:
tgt_train_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-train.txt')
tgt_valid_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-val.txt')
tgt_test_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
texts = [txt.split() for txt in tgt_train_texts] + [txt.split() for txt in tgt_valid_texts] + [txt.split() for txt in tgt_test_texts]
tgt_field.build_vocab(texts) # Sort vocabulary + add special tokens, <unknown>, <pad>, <bos>, and <eos>.
else:
vocab = read_lines_from_file(preprocessed_data_dir_path + '/vocab.txt')
#tgt_field.vocab = vocab # AttributeError: 'list' object has no attribute 'stoi'.
tgt_field.build_vocab([vocab]) # Sort vocabulary + add special tokens, <unknown>, <pad>, <bos>, and <eos>.
corpus_id_field.build_vocab(['train'])
# REF [function] >> build_vocab() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/inputter.py.
vocab_fields = {
'src': src_field,
'tgt': onmt.inputters.text_dataset.TextMultiField('tgt', tgt_field, feats_fields=[]),
'indices': indices_field,
'corpus_id': corpus_id_field,
}
"""
src_text_field = vocab_fields['src'].base_field # Error: AttributeError: 'Field' object has no attribute 'base_field'.
src_vocab = src_text_field.vocab
src_padding = src_vocab.stoi[src_text_field.pad_token]
#src_unk = src_vocab.stoi[src_text_field.unk_token]
#src_bos = src_vocab.stoi[src_text_field.init_token]
#src_eos = src_vocab.stoi[src_text_field.eos_token]
"""
tgt_text_field = vocab_fields['tgt'].base_field
tgt_vocab = tgt_text_field.vocab
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
#tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
#tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
#tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
src_reader = onmt.inputters.str2reader[src_data_type]
tgt_reader = onmt.inputters.str2reader[tgt_data_type]
if src_data_type == 'img':
src_reader_obj = src_reader(truncate=None, channel_size=input_channel)
elif src_data_type == 'audio':
src_reader_obj = src_reader(sample_rate=0, window_size=0, window_stride=0, window=None, normalize_audio=True, truncate=None)
else:
src_reader_obj = src_reader()
if tgt_data_type == 'img':
tgt_reader_obj = tgt_reader(truncate=None, channel_size=input_channel)
elif tgt_data_type == 'audio':
tgt_reader_obj = tgt_reader(sample_rate=0, window_size=0, window_stride=0, window=None, normalize_audio=True, truncate=None)
else:
tgt_reader_obj = tgt_reader()
if is_preprocessed_data_iterators_used:
# NOTE [info] >> When preprocessing data by onmt_preprocess or ${OpenNMT-py_HOME}/onmt/bin/preprocess.py.
train_data_files = list()
for idx in range(num_train_data_files):
train_data_files.append(preprocessed_data_dir_path + '/demo.train.{}.pt'.format(idx))
valid_data_files = list()
for idx in range(num_valid_data_files):
valid_data_files.append(preprocessed_data_dir_path + '/demo.valid.{}.pt'.format(idx))
# REF [function] >> build_dataset_iter() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/inputter.py.
train_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=train_data_files, fields=vocab_fields,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=True, repeat=True,
num_batches_multiple=1, yield_raw_example=False
)
valid_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=valid_data_files, fields=vocab_fields,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=False, repeat=False,
num_batches_multiple=1, yield_raw_example=False
)
else:
sortkey = onmt.inputters.str2sortkey[tgt_data_type]
src_dir_path = preprocessed_data_dir_path + '/images'
src_train_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-train.txt')
src_train_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_train_filepaths]
tgt_train_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-train.txt')
src_valid_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-val.txt')
src_valid_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_valid_filepaths]
tgt_valid_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-val.txt')
# REF [function] >> translate() in https://github.com/OpenNMT/OpenNMT-py/tree/master/onmt/translate/translator.py.
train_src_data = {'reader': src_reader_obj, 'data': src_train_filepaths, 'dir': src_dir_path}
train_tgt_data = {'reader': tgt_reader_obj, 'data': tgt_train_texts, 'dir': None}
train_readers, train_data, train_dirs = onmt.inputters.Dataset.config([('src', train_src_data), ('tgt', train_tgt_data)])
train_dataset = onmt.inputters.Dataset(
fields=vocab_fields, readers=train_readers, data=train_data, dirs=train_dirs, sort_key=sortkey,
filter_pred=None, corpus_id=None
)
valid_src_data = {'reader': src_reader_obj, 'data': src_valid_filepaths, 'dir': src_dir_path}
valid_tgt_data = {'reader': tgt_reader_obj, 'data': tgt_valid_texts, 'dir': None}
valid_readers, valid_data, valid_dirs = onmt.inputters.Dataset.config([('src', valid_src_data), ('tgt', valid_tgt_data)])
valid_dataset = onmt.inputters.Dataset(
fields=vocab_fields, readers=valid_readers, data=valid_data, dirs=valid_dirs, sort_key=sortkey,
filter_pred=None, corpus_id=None
)
train_iter = onmt.inputters.inputter.OrderedIterator(
dataset=train_dataset,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, train=True, repeat=True,
sort=False, sort_within_batch=True,
yield_raw_example=False
)
#train_iter.create_batches()
valid_iter = onmt.inputters.inputter.OrderedIterator(
dataset=valid_dataset,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, train=False, repeat=False,
sort=False, sort_within_batch=True,
yield_raw_example=False
)
#valid_iter.create_batches()
if False:
# Information on inputs.
# Refer to "Information on outputs".
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
print('<UNK> = {}, <PAD> = {}, <BOS> = {}, <EOS> = {}.'.format(tgt_unk, tgt_padding, tgt_bos, tgt_eos))
for idx, batch in enumerate(train_iter):
# Source: [B, C, H, W] & [0, 1].
# Target: [T, B, 1]. No one-hot encoding.
print('Source #{}: {}, {}, ({}, {}).'.format(idx, batch.src.shape, batch.src.dtype, torch.min(batch.src), torch.max(batch.src)))
print('Target #{}: {}, {}.'.format(idx, batch.tgt.shape, batch.tgt.dtype))
#print('Target #{}: {}.'.format(idx, batch.tgt.transpose(0, 1).squeeze(dim=-1)))
if idx >= 4: break
#--------------------
# Build a model.
if is_my_model_used:
model, generator = build_my_im2latex_model(image_height, input_channel, num_classes, word_vec_size)
else:
model, generator = build_im2latex_model(input_channel, num_classes, word_vec_size)
#if model: print('Model:\n{}'.format(model))
# TODO [check] >> I don't know why the location where I add a generator should be different.
if is_my_model_used:
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
if is_model_loaded:
model, generator = load_model(model_filepath_to_load, model, generator, device=device)
if not is_my_model_used:
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
if is_trained:
# Specify loss computation module.
loss = onmt.utils.loss.NMTLossCompute(
criterion=torch.nn.NLLLoss(ignore_index=tgt_padding, reduction='sum'),
generator=model.generator
)
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optim = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
# Keeping track of the output requires a report manager.
#model_saver = onmt.models.ModelSaver('./data/model_ckpt', model, model_opt, fields=vocab_fields, optim=optim, keep_checkpoint=-1)
model_saver = None
report_manager = onmt.utils.ReportMgr(report_every=50, start_time=None, tensorboard_writer=None)
trainer = onmt.Trainer(
model=model, train_loss=loss, valid_loss=loss, optim=optim,
model_saver=model_saver,
report_manager=report_manager
)
print('Start training...')
start_time = time.time()
total_stats = trainer.train(
train_iter=train_iter, train_steps=train_steps,
valid_iter=valid_iter, valid_steps=valid_steps,
save_checkpoint_steps=save_checkpoint_steps
)
print('End training: {} secs.'.format(time.time() - start_time))
print('Train: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(total_stats.accuracy(), total_stats.xent(), total_stats.ppl()))
print('Start evaluating...')
start_time = time.time()
stats = trainer.validate(valid_iter=valid_iter, moving_average=None)
print('End evaluating: {} secs.'.format(time.time() - start_time))
print('Evaluation: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(stats.accuracy(), stats.xent(), stats.ppl()))
save_model(model_filepath, model, model.generator)
#--------------------
# Load up the translation functions.
scorer = onmt.translate.GNMTGlobalScorer(alpha=0.7, beta=0.0, length_penalty='avg', coverage_penalty='none')
if True:
# Use a customized onmt.translate.Translator._translate_batch_with_strategy().
# Use images as the input to a model.
import os, torchvision, cv2
tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
src_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-test.txt')
tgt_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
transform = torchvision.transforms.ToTensor()
src_batches = list()
for fpath in src_filepaths:
img_fpath = os.path.join(preprocessed_data_dir_path, 'images', fpath)
img = cv2.imread(img_fpath, cv2.IMREAD_COLOR)
if img is None:
print('Failed to load an image: {}.'.format(img_fpath))
continue
img = transform(img)
img = torch.unsqueeze(img, dim=0) # Batch: Tensor, [B, C, H, W].
img = img.to(device)
src_batches.append(img)
tgt_batches = list()
for txt in tgt_texts:
tgt_batches.append([txt])
assert len(src_batches) == len(tgt_batches)
is_beam_search_used = True
if is_beam_search_used:
beam_size = 30
n_best = 1
ratio = 0.0
else:
beam_size = 1
random_sampling_topk, random_sampling_temp = 1, 1
n_best = 1 # Fixed. For handling translation results.
min_length, max_length = 0, 100
block_ngram_repeat = 0
#ignore_when_blocking = frozenset()
#exclusion_idxs = {tgt_vocab.stoi[t] for t in ignore_when_blocking}
exclusion_idxs = set()
model.eval()
with torch.no_grad():
for src_batch, tgt_batch in zip(src_batches, tgt_batches):
#batch_size = len(src_batch)
batch_size = 1
if is_beam_search_used:
decode_strategy = create_beam_search_strategy(batch_size, scorer, beam_size, n_best, ratio, min_length, max_length, block_ngram_repeat, tgt_bos, tgt_eos, tgt_padding, exclusion_idxs)
else:
decode_strategy = create_greedy_search_strategy(batch_size, random_sampling_topk, random_sampling_temp, min_length, max_length, block_ngram_repeat, tgt_bos, tgt_eos, tgt_padding, exclusion_idxs)
print('Start translating...')
start_time = time.time()
trans_batch = translate_batch_with_strategy(model, decode_strategy, src_batch, batch_size, beam_size, tgt_unk, tgt_vocab, src_vocabs=[])
print('End translating: {} secs.'.format(time.time() - start_time))
# Information on outputs.
# Refer to "Information on inputs".
#trans_batch['predictions'] # [batch size (list)][#bests (list)][decoded token ID sequence (tensor)]. Each decoded output has <EOS> (not always) but no <SOS>.
#trans_batch['scores'] # [batch size (list)][#bests (list)][scalar (tensor)].
#trans_batch['attention'] # [batch size (list)][#bests (list)][?].
#trans_batch['alignment'] # [batch size (list)][?].
for idx, (gt, pred, score, attn, alignment) in enumerate(zip(tgt_batch, trans_batch['predictions'], trans_batch['scores'], trans_batch['attention'], trans_batch['alignment'])):
print('ID #{}:'.format(idx))
print('\tG/T = {}.'.format(gt))
for rank_id in range(n_best):
try:
print('\tPrediction (rank {}) = {}.'.format(rank_id, ' '.join([tgt_vocab.itos[elem] for elem in pred[0].cpu().numpy() if elem < len(tgt_vocab.itos)])))
except IndexError as ex:
print('\tDecoding error (rank {}): {}.'.format(rank_id, pred[rank_id]))
print('\tScore (rank {}) = {}.'.format(rank_id, score[rank_id].cpu().item()))
#print('\tAttention (rank {}) = {}.'.format(rank_id, attn[rank_id].cpu().numpy()))
#print('\tAlignment (rank {}) = {}.'.format(rank_id, alignment[rank_id].cpu().item())) # Empty.
elif False:
# NOTE [error] >> This is not working when sources are not text.
# onmt.translate.Translator.translate() uses an instance of onmt.translate.TranslationBuilder.
# onmt.translate.TranslationBuilder builds a word-based translation from the batch output of translator and the underlying dictionaries.
# NOTE [info] >> When using input files.
try:
import tempfile
with tempfile.TemporaryFile(mode='w') as fd:
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader_obj, tgt_reader=tgt_reader_obj,
n_best=1, min_length=0, max_length=100,
beam_size=30, random_sampling_topk=1, random_sampling_temp=1,
data_type=src_data_type,
global_scorer=scorer,
copy_attn=False, report_align=False, report_score=True, out_file=fd,
gpu=gpu
)
src_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-test.txt')
src_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_filepaths]
tgt_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
try:
print('Start translating...')
start_time = time.time()
scores, predictions = translator.translate(src=src_filepaths, tgt=None, src_dir=preprocessed_data_dir_path + '/images', batch_size=batch_size, batch_type='tokens', attn_debug=False, align_debug=False, phrase_table='')
#scores, predictions = translator.translate(src=src_filepaths, tgt=tgt_texts, src_dir=preprocessed_data_dir_path + '/images', batch_size=batch_size, batch_type='tokens', attn_debug=False, align_debug=False, phrase_table='')
print('End translating: {} secs.'.format(time.time() - start_time))
for idx, (score, pred, gt) in enumerate(zip(scores, predictions, tgt_texts)):
print('ID #{}:'.format(idx))
print('\tG/T = {}.'.format(gt))
print('\tPrediction = {}.'.format(pred[0]))
print('\tScore = {}.'.format(score[0].cpu().item()))
except (RuntimeError, Exception) as ex:
print('Error: {}.'.format(ex))
except UnicodeDecodeError as ex:
print('Unicode decode error: {}.'.format(ex))
except FileNotFoundError as ex:
print('File not found: {}.'.format(ex))
else:
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader_obj, tgt_reader=tgt_reader_obj,
n_best=1, min_length=0, max_length=100,
beam_size=30, random_sampling_topk=1, random_sampling_temp=1,
data_type=src_data_type,
global_scorer=scorer,
copy_attn=False, report_align=False, report_score=True,
gpu=gpu
)
for batch in valid_iter:
print('Start translating...')
start_time = time.time()
trans_batch = translator.translate_batch(batch=batch, src_vocabs=[], attn_debug=False)
print('End translating: {} secs.'.format(time.time() - start_time))
#print('\tBatch source = {}.'.format(trans_batch['batch'].src.cpu().numpy()))
#print('\tBatch target = {}.'.format(trans_batch['batch'].tgt.cpu().numpy()))
#print('\tBatch indices = {}.'.format(trans_batch['batch'].indices.cpu().numpy()))
#print('\tBatch corpus ID = {}.'.format(trans_batch['batch'].corpus_id.cpu().numpy()))
for idx, (pred, score, attn, gold_score, alignment) in enumerate(zip(trans_batch['predictions'], trans_batch['scores'], trans_batch['attention'], trans_batch['gold_score'], trans_batch['alignment'])):
print('ID #{}:'.format(idx))
try:
print('\tPrediction = {}.'.format(' '.join([tgt_vocab.itos[elem] for elem in pred[0].cpu().numpy() if elem < len(tgt_vocab.itos)])))
except IndexError as ex:
print('\tDecoding error: {}.'.format(pred[0]))
print('\tScore = {}.'.format(score[0].cpu().item()))
#print('\tAttention = {}.'.format(attn[0].cpu().numpy()))
print('\tGold score = {}.'.format(gold_score.cpu().numpy()))
#print('\tAlignment = {}.'.format(alignment[0].cpu().item()))
#--------------------------------------------------------------------
"""
NMTModel(
(encoder): ImageEncoder(
(layer1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer2): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer3): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer5): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer6): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(batch_norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batch_norm2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batch_norm3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(rnn): LSTM(512, 250, num_layers=2, dropout=0.3, bidirectional=True)
(pos_lut): Embedding(1000, 512)
)
(decoder): InputFeedRNNDecoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(1798, 80, padding_idx=1)
)
)
)
(dropout): Dropout(p=0.3, inplace=False)
(rnn): StackedLSTM(
(dropout): Dropout(p=0.3, inplace=False)
(layers): ModuleList(
(0): LSTMCell(580, 500)
(1): LSTMCell(500, 500)
)
)
(attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
)
(generator): Sequential(
(0): Linear(in_features=500, out_features=1798, bias=True)
(1): Cast()
(2): LogSoftmax()
)
)
"""
def build_submodels(input_channel, num_classes, word_vec_size):
bidirectional_encoder = True
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = encoder_rnn_size
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = onmt.encoders.ImageEncoder(
num_layers=encoder_num_layers, bidirectional=bidirectional_encoder,
rnn_size=encoder_rnn_size, dropout=encoder_dropout, image_chanel_size=input_channel
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
return encoder, decoder, generator
class MyModel(torch.nn.Module):
def __init__(self, encoder, decoder, generator=None):
super().__init__()
self.encoder, self.decoder, self._generator = encoder, decoder, generator
# REF [function] >> NMTModel.forward() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/models/model.py
def forward(self, src, tgt, lengths, bptt=False, with_align=False):
# TODO [check] >> This function is not tested.
enc_state, memory_bank, lengths = self.encoder(src, lengths=lengths)
if bptt is False:
self.decoder.init_state(src, memory_bank, enc_state)
dec_in = tgt[:-1] # Exclude last target from inputs.
dec_outs, attns = self.decoder(dec_in, memory_bank, memory_lengths=lengths, with_align=with_align)
if self._generator: dec_outs = self._generator(dec_outs)
return dec_outs, attns
# REF [site] >> https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/model_builder.py
def build_my_simple_model(use_NMTModel, input_channel, num_classes, word_vec_size):
encoder, decoder, generator = build_submodels(input_channel, num_classes, word_vec_size)
if use_NMTModel:
model = onmt.models.NMTModel(encoder, decoder)
else:
model = MyModel(encoder, decoder, generator=None)
return model, generator
def simple_example():
use_NMTModel = False
input_channel = 3
num_classes = 1798
word_vec_size = 80
batch_size = 64
max_time_steps = 10
gpu = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Build a model.
model, generator = build_my_simple_model(use_NMTModel, input_channel, num_classes, word_vec_size)
#if model: print('Model:\n{}'.format(model))
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
# For checking.
if False:
# Information on inputs.
inputs = torch.rand(batch_size, input_channel, 300, 300) # [B, C, H, W]. [0, 1].
outputs = torch.randint(num_classes, (max_time_steps, batch_size, 1)) # [T, B, 1]. No one-hot encoding.
output_lens = torch.randint(1, max_time_steps + 1, (batch_size,)) # [B].
with torch.no_grad():
# Information on outputs.
model_outputs, attentions = model(inputs.to(device), outputs.to(device), output_lens.to(device)) # [T-1, B, hidden size] & [T-1, B, ???].
model_outputs = model.generator(model_outputs) # [T-1, B, #classes].
print('Source: {}, {}, ({}, {}).'.format(inputs.shape, inputs.dtype, torch.min(inputs), torch.max(inputs)))
print('Target: {}, {}.'.format(outputs.shape, outputs.dtype))
print('Model output: {}, {}.'.format(model_outputs.shape, model_outputs.dtype))
#model_outputs = model_outputs.transpose(0, 1) # [T-1, B, #classes] -> [B, T-1, #classes] where T-1 is for one-step look-ahead.
#_, model_outputs = torch.max(model_outputs, dim=-1)
model_outputs = model_outputs.cpu().numpy()
attentions = attentions['std'].cpu().numpy()
#attentions = attentions['copy'].cpu().numpy() # If copy_attn = True.
#attentions = attentions['coverage'].cpu().numpy() # If coverage_attn = True.
print("Model outputs' shape = {}.".format(model_outputs.shape))
print("Attentions' shape = {}.".format(attentions.shape))
#--------------------
# Train and evaluate.
#--------------------
# Infer.
# FIXME [implement] >> How to infer?
def main():
#preprocess_test() # Not yet completed.
#train_test() # Not yet completed.
#translate_test() # Not yet completed.
#server_test() # Not yet implemented.
#--------------------
#library_example()
im2latex_example()
#simple_example() # Not yet completed.
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
erkike/daw | angular/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 960 | 45344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
self.WriteLn('LOCAL_CXX_STL := libc++_static')
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 0)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?= first\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| mit |
xtr4nge/FruityProxy | plugins/plugin.py | 1 | 1262 | #!/usr/bin/env python
# Copyright (C) 2015-2016 xtr4nge [_AT_] gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
try:
from mitmproxy import controller, proxy # mitmproxy 0.17
from mitmproxy.proxy.server import ProxyServer # mitmproxy 0.17
except:
from libmproxy import controller, proxy # mitmproxy 0.15
from libmproxy.proxy.server import ProxyServer # mitmproxy 0.15
import logging
from configobj import ConfigObj
class Plugin(object):
name = "Plugin"
version = "1.0"
config = ConfigObj("fruityproxy.conf")
def request(self, flow):
pass
def response(self, flow):
pass
| lgpl-3.0 |
achals/servo | tests/wpt/css-tests/css21_dev/xhtml1print/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
byterom/android_external_chromium_org | third_party/protobuf/python/google/protobuf/internal/type_checkers.py | 527 | 12163 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = '[email protected] (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| bsd-3-clause |
Luffin/powerline | powerline/renderers/vim.py | 32 | 5785 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import vim
from powerline.bindings.vim import vim_get_func, vim_getoption, environ, current_tabpage, get_vim_encoding
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from powerline.theme import Theme
from powerline.lib.unicode import unichr, register_strwidth_error
vim_mode = vim_get_func('mode', rettype='unicode')
if int(vim.eval('v:version')) >= 702:
_vim_mode = vim_mode
vim_mode = lambda: _vim_mode(1)
mode_translations = {
unichr(ord('V') - 0x40): '^V',
unichr(ord('S') - 0x40): '^S',
}
class VimRenderer(Renderer):
'''Powerline vim segment renderer.'''
character_translations = Renderer.character_translations.copy()
character_translations[ord('%')] = '%%'
segment_info = Renderer.segment_info.copy()
segment_info.update(environ=environ)
def __init__(self, *args, **kwargs):
if not hasattr(vim, 'strwidth'):
# Hope nobody want to change this at runtime
if vim.eval('&ambiwidth') == 'double':
kwargs = dict(**kwargs)
kwargs['ambigious'] = 2
super(VimRenderer, self).__init__(*args, **kwargs)
self.hl_groups = {}
self.prev_highlight = None
self.strwidth_error_name = register_strwidth_error(self.strwidth)
self.encoding = get_vim_encoding()
def shutdown(self):
self.theme.shutdown()
for match in self.local_themes.values():
if 'theme' in match:
match['theme'].shutdown()
def add_local_theme(self, matcher, theme):
if matcher in self.local_themes:
raise KeyError('There is already a local theme with given matcher')
self.local_themes[matcher] = theme
def get_matched_theme(self, match):
try:
return match['theme']
except KeyError:
match['theme'] = Theme(theme_config=match['config'], main_theme_config=self.theme_config, **self.theme_kwargs)
return match['theme']
def get_theme(self, matcher_info):
if matcher_info is None:
return self.get_matched_theme(self.local_themes[None])
for matcher in self.local_themes.keys():
if matcher and matcher(matcher_info):
return self.get_matched_theme(self.local_themes[matcher])
else:
return self.theme
if hasattr(vim, 'strwidth'):
if sys.version_info < (3,):
def strwidth(self, string):
# Does not work with tabs, but neither is strwidth from default
# renderer
return vim.strwidth(string.encode(self.encoding, 'replace'))
else:
@staticmethod
def strwidth(string):
return vim.strwidth(string)
def get_segment_info(self, segment_info, mode):
return segment_info or self.segment_info
def render(self, window=None, window_id=None, winnr=None, is_tabline=False):
'''Render all segments.'''
segment_info = self.segment_info.copy()
if window is vim.current.window:
mode = vim_mode()
mode = mode_translations.get(mode, mode)
else:
mode = 'nc'
segment_info.update(
window=window,
mode=mode,
window_id=window_id,
winnr=winnr,
buffer=window.buffer,
tabpage=current_tabpage(),
encoding=self.encoding,
)
segment_info['tabnr'] = segment_info['tabpage'].number
segment_info['bufnr'] = segment_info['buffer'].number
if is_tabline:
winwidth = int(vim_getoption('columns'))
else:
winwidth = segment_info['window'].width
statusline = super(VimRenderer, self).render(
mode=mode,
width=winwidth,
segment_info=segment_info,
matcher_info=(None if is_tabline else segment_info),
)
statusline = statusline.encode(self.encoding, self.strwidth_error_name)
return statusline
def reset_highlight(self):
self.hl_groups.clear()
def hlstyle(self, fg=None, bg=None, attrs=None):
'''Highlight a segment.
If an argument is None, the argument is ignored. If an argument is
False, the argument is reset to the terminal defaults. If an argument
is a valid color or attribute, it’s added to the vim highlight group.
'''
# In order not to hit E541 two consequent identical highlighting
# specifiers may be squashed into one.
attrs = attrs or 0 # Normalize `attrs`
if (fg, bg, attrs) == self.prev_highlight:
return ''
self.prev_highlight = (fg, bg, attrs)
# We don’t need to explicitly reset attributes in vim, so skip those
# calls
if not attrs and not bg and not fg:
return ''
if not (fg, bg, attrs) in self.hl_groups:
hl_group = {
'ctermfg': 'NONE',
'guifg': None,
'ctermbg': 'NONE',
'guibg': None,
'attrs': ['NONE'],
'name': '',
}
if fg is not None and fg is not False:
hl_group['ctermfg'] = fg[0]
hl_group['guifg'] = fg[1]
if bg is not None and bg is not False:
hl_group['ctermbg'] = bg[0]
hl_group['guibg'] = bg[1]
if attrs:
hl_group['attrs'] = []
if attrs & ATTR_BOLD:
hl_group['attrs'].append('bold')
if attrs & ATTR_ITALIC:
hl_group['attrs'].append('italic')
if attrs & ATTR_UNDERLINE:
hl_group['attrs'].append('underline')
hl_group['name'] = (
'Pl_'
+ str(hl_group['ctermfg']) + '_'
+ str(hl_group['guifg']) + '_'
+ str(hl_group['ctermbg']) + '_'
+ str(hl_group['guibg']) + '_'
+ ''.join(hl_group['attrs'])
)
self.hl_groups[(fg, bg, attrs)] = hl_group
vim.command('hi {group} ctermfg={ctermfg} guifg={guifg} guibg={guibg} ctermbg={ctermbg} cterm={attrs} gui={attrs}'.format(
group=hl_group['name'],
ctermfg=hl_group['ctermfg'],
guifg='#{0:06x}'.format(hl_group['guifg']) if hl_group['guifg'] is not None else 'NONE',
ctermbg=hl_group['ctermbg'],
guibg='#{0:06x}'.format(hl_group['guibg']) if hl_group['guibg'] is not None else 'NONE',
attrs=','.join(hl_group['attrs']),
))
return '%#' + self.hl_groups[(fg, bg, attrs)]['name'] + '#'
renderer = VimRenderer
| mit |
2014c2g2/teamwork | exts/w2/static/Brython2.0.0-20140209-164925/Lib/browser/indexed_db.py | 100 | 2966 | class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
cursor.continue()
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
| gpl-2.0 |
phlax/translate | translate/convert/po2prop.py | 3 | 11398 | # -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files to Java/Mozilla .properties files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
for examples and usage instructions.
"""
import six
import warnings
from translate.convert import accesskey, convert
from translate.misc import quote
from translate.storage import po, properties
eol = u"\n"
def applytranslation(key, propunit, inunit, mixedkeys):
"""applies the translation for key in the po unit to the prop unit"""
# this converts the po-style string to a prop-style string
value = inunit.target
# handle mixed keys
for labelsuffix in properties.labelsuffixes:
if key.endswith(labelsuffix):
if key in mixedkeys:
value, akey = accesskey.extract(value)
break
else:
for akeysuffix in properties.accesskeysuffixes:
if key.endswith(akeysuffix):
if key in mixedkeys:
label, value = accesskey.extract(value)
if not value:
warnings.warn("Could not find accesskey for %s" % key)
# Use the source language accesskey
label, value = accesskey.extract(inunit.source)
else:
original = propunit.source
# For the sake of diffs we keep the case of the
# accesskey the same if we know the translation didn't
# change. Casing matters in XUL.
if value == propunit.source and original.lower() == value.lower():
if original.isupper():
value = value.upper()
elif original.islower():
value = value.lower()
return value
class reprop(object):
def __init__(self, templatefile, inputstore, personality, encoding=None,
remove_untranslated=False):
self.templatefile = templatefile
self.inputstore = inputstore
self.personality = properties.get_dialect(personality)
self.encoding = encoding
if self.encoding is None:
self.encoding = self.personality.default_encoding
self.remove_untranslated = remove_untranslated
self.mixer = accesskey.UnitMixer(properties.labelsuffixes,
properties.accesskeysuffixes)
def convertstore(self, includefuzzy=False):
self.includefuzzy = includefuzzy
self.inmultilinemsgid = False
self.inecho = False
self.inputstore.makeindex()
if self.personality.name == "gaia":
self._explode_gaia_plurals()
outputlines = []
# Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead
content = self.templatefile.read().decode(self.encoding)
for line in content.splitlines(True):
outputstr = self.convertline(line)
outputlines.append(outputstr)
return u"".join(outputlines).encode(self.encoding)
def _handle_accesskeys(self, inunit, currkey):
value = inunit.target
if self.personality.name == "mozilla":
keys = inunit.getlocations()
mixedkeys = self.mixer.match_entities(keys)
for key in keys:
if key == currkey and key in self.inputstore.locationindex:
propunit = self.inputstore.locationindex[key] # find the prop
value = applytranslation(key, propunit, inunit, mixedkeys)
break
return value
def _explode_gaia_plurals(self):
"""Explode the gaia plurals."""
from translate.lang import data
for unit in self.inputstore.units:
if not unit.hasplural():
continue
if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():
continue
names = data.cldr_plural_categories
location = unit.getlocations()[0]
for category, text in zip(names, unit.target.strings):
# TODO: for now we assume all forms are present. We need to
# fill in the rest after mapping things to the proper CLDR names.
if category == 'zero':
# [zero] cases are translated as separate units
continue
new_unit = self.inputstore.addsourceunit(u"fish") # not used
new_location = '%s[%s]' % (location, category)
new_unit.addlocation(new_location)
new_unit.target = text
self.inputstore.locationindex[new_location] = new_unit
# We don't want the plural marker to be translated:
del self.inputstore.locationindex[location]
def convertline(self, line):
returnline = u""
# handle multiline msgid if we're in one
if self.inmultilinemsgid:
msgid = quote.rstripeol(line).strip()
# see if there's more
self.inmultilinemsgid = (msgid[-1:] == '\\')
# if we're echoing...
if self.inecho:
returnline = line
# otherwise, this could be a comment
elif line.strip()[:1] == '#':
returnline = quote.rstripeol(line) + eol
else:
line = quote.rstripeol(line)
delimiter_char, delimiter_pos = self.personality.find_delimiter(line)
if quote.rstripeol(line)[-1:] == '\\':
self.inmultilinemsgid = True
if delimiter_pos == -1:
key = self.personality.key_strip(line)
delimiter = " %s " % self.personality.delimiters[0]
else:
key = self.personality.key_strip(line[:delimiter_pos])
# Calculate space around the equal sign
prespace = line[line.find(' ', len(key)):delimiter_pos]
postspacestart = len(line[delimiter_pos+1:])
postspaceend = len(line[delimiter_pos+1:].lstrip())
postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]
delimiter = prespace + delimiter_char + postspace
if key in self.inputstore.locationindex:
unit = self.inputstore.locationindex[key]
if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
returnline = u""
self.inecho = False
else:
if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:
value = unit.source
else:
value = self._handle_accesskeys(unit, key)
self.inecho = False
assert isinstance(value, six.text_type)
returnline = "%(key)s%(del)s%(value)s%(term)s%(eol)s" % {
"key": "%s%s%s" % (self.personality.key_wrap_char,
key,
self.personality.key_wrap_char),
"del": delimiter,
"value": "%s%s%s" % (self.personality.value_wrap_char,
self.personality.encode(value),
self.personality.value_wrap_char),
"term": self.personality.pair_terminator,
"eol": eol,
}
else:
self.inecho = True
returnline = line + eol
assert isinstance(returnline, six.text_type)
return returnline
def convertstrings(inputfile, outputfile, templatefile, personality="strings",
includefuzzy=False, encoding=None, outputthreshold=None,
remove_untranslated=False):
""".strings specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="strings", includefuzzy=includefuzzy,
encoding=encoding, outputthreshold=outputthreshold,
remove_untranslated=remove_untranslated)
def convertmozillaprop(inputfile, outputfile, templatefile,
includefuzzy=False, remove_untranslated=False,
outputthreshold=None):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="mozilla", includefuzzy=includefuzzy,
remove_untranslated=remove_untranslated,
outputthreshold=outputthreshold)
def convertprop(inputfile, outputfile, templatefile, personality="java",
includefuzzy=False, encoding=None, remove_untranslated=False,
outputthreshold=None):
inputstore = po.pofile(inputfile)
if not convert.should_output_store(inputstore, outputthreshold):
return False
if templatefile is None:
raise ValueError("must have template file for properties files")
# convertor = po2prop()
else:
convertor = reprop(templatefile, inputstore, personality, encoding,
remove_untranslated)
outputprop = convertor.convertstore(includefuzzy)
outputfile.write(outputprop)
return True
formats = {
("po", "properties"): ("properties", convertprop),
("po", "lang"): ("lang", convertprop),
("po", "strings"): ("strings", convertstrings),
}
def main(argv=None):
# handle command line options
parser = convert.ConvertOptionParser(formats, usetemplates=True,
description=__doc__)
parser.add_option(
"", "--personality", dest="personality",
default=properties.default_dialect, type="choice",
choices=list(properties.dialects.keys()),
help="override the input file format: %s (for .properties files, default: %s)" % (
", ".join(six.iterkeys(properties.dialects)), properties.default_dialect),
metavar="TYPE")
parser.add_option(
"", "--encoding", dest="encoding", default=None,
help="override the encoding set by the personality",
metavar="ENCODING")
parser.add_threshold_option()
parser.add_fuzzy_option()
parser.add_remove_untranslated_option()
parser.passthrough.append("personality")
parser.passthrough.append("encoding")
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 |
mordred-descriptor/mordred | mordred/_util.py | 1 | 2230 | from __future__ import print_function
import os
import sys
import numpy as np
def parse_enum(enum, v):
if isinstance(v, enum):
return v
else:
return enum[v]
def atoms_to_numpy(f, mol, dtype="float"):
return np.fromiter((f(a) for a in mol.GetAtoms()), dtype, mol.GetNumAtoms())
def conformer_to_numpy(conf):
return np.array([list(conf.GetAtomPosition(i)) for i in range(conf.GetNumAtoms())])
class Capture(object):
def __init__(self, target="stderr"):
self.target = target
self.orig = getattr(sys, target)
self.result = []
def write(self, text):
self.result.append(text)
def flush(self):
pass
def __enter__(self):
setattr(sys, self.target, self)
return self
def __exit__(self, *args):
setattr(sys, self.target, self.orig)
class DummyBar(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kws):
pass
def update(self, *args, **kws):
pass
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
print(s, file=file, end=end) # noqa: T003
class NotebookWrapper(object):
def __init__(self, **kwargs):
from tqdm import tqdm_notebook
self.bar = tqdm_notebook(**kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
pass
def update(self, *args, **kwargs):
self.bar.update(*args, **kwargs)
def write(self, *args, **kwargs):
self.bar.update(*args, **kwargs)
def PathType(string):
if not os.path.isfile(string):
raise ValueError("file not exists: {}".format(string))
return string
def module_prog(pkg):
return "{} -m {}".format(os.path.basename(sys.executable), pkg)
def to_ordinal(n):
r"""Int to ordinal string.
>>> to_ordinal(1)
'first'
>>> to_ordinal(2)
'second'
>>> to_ordinal(3)
'third'
>>> to_ordinal(4)
'4-th'
>>> to_ordinal(104)
'104-th'
"""
if n == 1:
return "first"
elif n == 2:
return "second"
elif n == 3:
return "third"
else:
return "{}-th".format(n)
| bsd-3-clause |
thinkopensolutions/geraldo | site/newsite/django_1_0/django/utils/datastructures.py | 10 | 13100 | class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __init__(self, data=None):
if data is None:
data = {}
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(SortedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(SortedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(SortedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(SortedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError, "Key %r not found in %r" % (key, self)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__(super(MultiValueDict, self).items())
def __deepcopy__(self, memo=None):
import copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def copy(self):
"""Returns a copy of this object."""
return self.__deepcopy__()
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError, "update expected at most 1 arguments, got %d" % len(args)
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError, "MultiValueDict.update() takes either a MultiValueDict or dictionary"
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError, self.warning
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| lgpl-3.0 |
mixturemodel-flow/tensorflow | tensorflow/tools/compatibility/ast_edits.py | 47 | 18961 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `function_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_handle`: maps function names to custom handlers for the function
For an example, see `TFAPIChangeSpec`.
"""
class _FileEditTuple(collections.namedtuple(
"_FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a _FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class _FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
_FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class _ASTCallVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, filename, lines, api_change_spec):
self._filename = filename
self._file_edit = _FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = api_change_spec
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name:
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if argval_lineno is not None and argval_col_offset is not None:
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if (self._lines[argval_lineno - 1][key_start:key_end] ==
argkey + "="):
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name:
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
| apache-2.0 |
bcoca/ansible | test/units/galaxy/test_collection_install.py | 15 | 43234 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
| gpl-3.0 |
pleaseproject/python-for-android | python-modules/twisted/twisted/internet/_signals.py | 57 | 6075 | # -*- test-case-name: twisted.test.test_process,twisted.internet.test.test_process -*-
# Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides a uniform interface to the several mechanisms which are
possibly available for dealing with signals.
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART
(ie, disabling system call interruption) and registering a C signal
handler which writes a byte to a pipe. The other end of the pipe is
registered with the event loop, allowing it to wake up shortly after
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
for the implementation of the event loop side of this solution. The
use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
The actual solution implemented in this module depends on the version
of Python. From version 2.6, C{signal.siginterrupt} and
C{signal.set_wakeup_fd} allow the necessary C signal handler which
writes to the pipe to be registered with C{SA_RESTART}. Prior to 2.6,
the L{twisted.internet._sigchld} extension module provides similar
functionality.
If neither of these is available, a Python signal handler is used
instead. This is essentially the naive solution mentioned above and
has the problems described there.
"""
import os
try:
from signal import set_wakeup_fd, siginterrupt
except ImportError:
set_wakeup_fd = siginterrupt = None
try:
import signal
except ImportError:
signal = None
from twisted.python.log import msg
try:
from twisted.internet._sigchld import installHandler as _extInstallHandler, \
isDefaultHandler as _extIsDefaultHandler
except ImportError:
_extInstallHandler = _extIsDefaultHandler = None
class _Handler(object):
"""
L{_Handler} is a signal handler which writes a byte to a file descriptor
whenever it is invoked.
@ivar fd: The file descriptor to which to write. If this is C{None},
nothing will be written.
"""
def __init__(self, fd):
self.fd = fd
def __call__(self, *args):
"""
L{_Handler.__call__} is the signal handler. It will write a byte to
the wrapped file descriptor, if there is one.
"""
if self.fd is not None:
try:
os.write(self.fd, '\0')
except:
pass
def _installHandlerUsingSignal(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by creating an instance of L{_Handler} with C{fd}
and installing it as the signal handler.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
previous = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
previous = signal.signal(signal.SIGCHLD, _Handler(fd))
if isinstance(previous, _Handler):
return previous.fd
return -1
def _installHandlerUsingSetWakeup(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing an instance of L{_Handler} wrapped
around C{None}, setting the I{SIGCHLD} handler as not allowed to
interrupt system calls, and using L{signal.set_wakeup_fd} to do the
actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
signal.signal(signal.SIGCHLD, _Handler(None))
siginterrupt(signal.SIGCHLD, False)
return set_wakeup_fd(fd)
def _isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
def _cannotInstallHandler(fd):
"""
Fail to install a signal handler for I{SIGCHLD}.
This implementation is used when the supporting code for the other
implementations is unavailable (on Python versions 2.5 and older where
neither the L{twisted.internet._sigchld} extension nor the standard
L{signal} module is available).
@param fd: Ignored; only for compatibility with the other
implementations of this interface.
@raise RuntimeError: Always raised to indicate no I{SIGCHLD} handler can
be installed.
"""
raise RuntimeError("Cannot install a SIGCHLD handler")
def _cannotDetermineDefault():
raise RuntimeError("No usable signal API available")
if set_wakeup_fd is not None:
msg('using set_wakeup_fd')
installHandler = _installHandlerUsingSetWakeup
isDefaultHandler = _isDefaultHandler
elif _extInstallHandler is not None:
msg('using _sigchld')
installHandler = _extInstallHandler
isDefaultHandler = _extIsDefaultHandler
elif signal is not None:
msg('using signal module')
installHandler = _installHandlerUsingSignal
isDefaultHandler = _isDefaultHandler
else:
msg('nothing unavailable')
installHandler = _cannotInstallHandler
isDefaultHandler = _cannotDetermineDefault
| apache-2.0 |
Akasurde/ansible | test/support/integration/plugins/module_utils/docker/swarm.py | 61 | 10842 | # (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <[email protected]>
# (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
LooseVersion,
)
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if repeat_check < 1:
repeat_check = 1
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if 'ManagerStatus' in node_info:
if node_info['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info['ManagerStatus']['Addr'].count(":")
if count_colons == 1:
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
else:
swarm_leader_ip = node_info['Status']['Addr']
node_info['Status']['Addr'] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
def get_unlock_key(self):
if self.docker_py_version < LooseVersion('2.7.0'):
return None
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
def get_service_inspect(self, service_id, skip_missing=False):
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info
| gpl-3.0 |
shakamunyi/nova | nova/tests/functional/v3/test_fping.py | 19 | 1663 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import fping
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.api.openstack.compute.contrib import test_fping
from nova import utils
class FpingSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-fping"
def setUp(self):
super(FpingSampleJsonTests, self).setUp()
def fake_check_fping(self):
pass
self.stubs.Set(utils, "execute", test_fping.execute)
self.stubs.Set(fping.FpingController, "check_fping",
fake_check_fping)
def test_get_fping(self):
self._post_server()
response = self._do_get('os-fping')
subs = self._get_regexes()
self._verify_response('fping-get-resp', subs, response, 200)
def test_get_fping_details(self):
uuid = self._post_server()
response = self._do_get('os-fping/%s' % (uuid))
subs = self._get_regexes()
self._verify_response('fping-get-details-resp', subs, response, 200)
| apache-2.0 |
mozilla/zamboni | sites/stage/settings.py | 4 | 9237 | from mkt.settings import * # noqa
import logging
import environ
environ.Env.read_env(env_file='/etc/zamboni/settings.env')
env = environ.Env()
ENV = env('ENV')
DOMAIN = env('DOMAIN')
SITE_URL = 'https://{0}'.format(DOMAIN)
CRONJOB_LOCK_PREFIX = DOMAIN
BROWSERID_AUDIENCES = [SITE_URL]
STATIC_URL = env('STATIC_URL')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
ADMINS = ()
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
ENGAGE_ROBOTS = False
SERVER_EMAIL = env('SERVER_EMAIL')
SESSION_COOKIE_SECURE = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {}
DATABASES['default'] = env.db('DATABASES_DEFAULT_URL')
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
DATABASES['slave'] = env.db('DATABASES_SLAVE_URL')
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
DATABASES['slave']['ATOMIC_REQUESTS'] = True
DATABASES['slave']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
SERVICES_DATABASE = env.db('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHE_PREFIX = 'mkt.%s' % ENV
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SECRET_KEY = env('SECRET_KEY')
# Celery
BROKER_URL = env('BROKER_URL')
CELERY_ALWAYS_EAGER = False
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = env('NETAPP_STORAGE_ROOT') + '/shared_storage'
GUARDED_ADDONS_PATH = env('NETAPP_STORAGE_ROOT') + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
EXTENSION_ICONS_PATH = UPLOADS_PATH + '/extension_icons'
WEBSITE_ICONS_PATH = UPLOADS_PATH + '/website_icons'
FEATURED_APP_BG_PATH = UPLOADS_PATH + '/featured_app_background'
FEED_COLLECTION_BG_PATH = UPLOADS_PATH + '/feed_collection_background'
FEED_SHELF_BG_PATH = UPLOADS_PATH + '/feed_shelf_background'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
WEBAPP_PROMO_IMG_PATH = UPLOADS_PATH + '/webapp_promo_imgs'
WEBSITE_PROMO_IMG_PATH = UPLOADS_PATH + '/website_promo_imgs'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
EXTENSIONS_PATH = NETAPP_STORAGE + '/extensions'
SIGNED_EXTENSIONS_PATH = NETAPP_STORAGE + '/signed-extensions'
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
ADDONS_PATH = env('NETAPP_STORAGE_ROOT') + '/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = env('RESPONSYS_ID')
ES_DEFAULT_NUM_REPLICAS = 2
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
STATSD_HOST = env('STATSD_HOST')
STATSD_PORT = env.int('STATSD_PORT', default=8125)
STATSD_PREFIX = 'mkt-{0}'.format(ENV)
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = False
NEW_FEATURES = True
CELERYD_TASK_SOFT_TIME_LIMIT = env.int('CELERYD_TASK_SOFT_TIME_LIMIT',
default=540)
CLEANCSS_BIN = 'cleancss'
LESS_BIN = 'lessc'
STYLUS_BIN = 'stylus'
UGLIFY_BIN = 'uglifyjs'
LESS_PREPROCESS = True
XSENDFILE = True
# False in Prod
ALLOW_SELF_REVIEWS = env.bool('ALLOW_SELF_REVIEWS', default=False)
GOOGLE_ANALYTICS_CREDENTIALS = env.dict('GOOGLE_ANALYTICS_CREDENTIALS')
GOOGLE_ANALYTICS_CREDENTIALS['user_agent'] = None
GOOGLE_ANALYTICS_CREDENTIALS['token_expiry'] = datetime.datetime(2013, 1, 3, 1, 20, 16, 45465) # noqa
GOOGLE_API_CREDENTIALS = env('GOOGLE_API_CREDENTIALS')
GEOIP_URL = env('GEOIP_URL')
RAISE_ON_SIGNAL_ERROR = True
API_THROTTLE = False
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
AES_KEYS = env.dict('AES_KEYS')
TASK_USER_ID = env('TASK_USER_ID', default=4757633)
SERVE_TMP_PATH = False
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL.rstrip('/'),)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
MEDIA_URL = STATIC_URL + 'media/'
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
SYSLOG_TAG = "http_app_mkt_{0}".format(ENV)
SYSLOG_TAG2 = "http_app_mkt_{0}_timer".format(ENV)
SYSLOG_CSP = "http_app_mkt_{0}csp".format(ENV)
STATSD_PREFIX = 'marketplace-{0}'.format(ENV)
WEBAPPS_RECEIPT_KEY = env('WEBAPPS_RECEIPT_KEY')
WEBAPPS_RECEIPT_URL = env('WEBAPPS_RECEIPT_URL')
WEBAPPS_UNIQUE_BY_DOMAIN = env.bool('WEBAPPS_UNIQUE_BY_DOMAIN', default=True)
SENTRY_DSN = env('SENTRY_DSN')
WEBAPPS_PUBLIC_KEY_DIRECTORY = NETAPP_STORAGE + '/public_keys'
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
SOLITUDE_HOSTS = (env('SOLITUDE_HOSTS'),)
SOLITUDE_OAUTH = {'key': env('SOLITUDE_OAUTH_KEY'),
'secret': env('SOLITUDE_OAUTH_SECRET')}
VALIDATOR_TIMEOUT = env.int('VALIDATOR_TIMEOUT', default=180)
VALIDATOR_IAF_URLS = ['https://marketplace.firefox.com',
'https://marketplace.allizom.org',
'https://marketplace-dev.allizom.org']
# Override the limited marketplace ones with these ones from AMO. Because
# the base gets overridden in the mkt.settings file, we'll set them back again.
# Note the addition of the testing locales dbg and rtl here.
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg', 'rtl', 'ln', 'tl')
LANGUAGES = lazy(langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
# Bug 748403
SIGNING_SERVER = env('SIGNING_SERVER')
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = ['marketplace-cdn.allizom.org']
# Bug 793876
SIGNED_APPS_KEY = env('SIGNED_APPS_KEY')
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = env('SIGNED_APPS_SERVER')
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = env('SIGNED_APPS_REVIEWER_SERVER')
GOOGLE_ANALYTICS_DOMAIN = 'marketplace.firefox.com'
# See mkt/settings.py for more info.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla-stage/payments/pay/v1'
APP_PURCHASE_SECRET = env('APP_PURCHASE_SECRET')
MONOLITH_PASSWORD = env('MONOLITH_PASSWORD')
MONOLITH_SERVER = env('MONOLITH_SERVER')
MONOLITH_INDEX = 'mkt{0}-time_*'.format(ENV)
# This is mainly for Marionette tests.
WEBAPP_MANIFEST_NAME = env('WEBAPP_MANIFEST_NAME', default='Marketplace Stage')
ENABLE_API_ERROR_SERVICE = env.bool('ENABLE_API_ERROR_SERVICE', default=True)
# Until Bango can properly do refunds.
BANGO_FAKE_REFUNDS = env.bool('BANGO_FAKE_REFUNDS', default=True)
ES_DEFAULT_NUM_REPLICAS = 2
ES_USE_PLUGINS = True
# Cache timeout on the /search/featured API.
CACHE_SEARCH_FEATURED_API_TIMEOUT = 60 * 5 # 5 min.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API')
POSTFIX_AUTH_TOKEN = env('POSTFIX_AUTH_TOKEN')
POSTFIX_DOMAIN = DOMAIN
# IARC content ratings.
IARC_ENV = env('IARC_ENV', default='test')
IARC_MOCK = False
IARC_PASSWORD = env('IARC_PASSWORD')
IARC_PLATFORM = env('IARC_PLATFORM', default='Firefox')
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCDEMOService/IARCServices.svc' # noqa
IARC_STOREFRONT_ID = env('IARC_STOREFRONT_ID', default=4)
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCDEMORating/Submission.aspx' # noqa
IARC_ALLOW_CERT_REUSE = True
# IARC V2
IARC_V2_STORE_ID = env('IARC_V2_STORE_ID', default=None)
IARC_V2_STORE_PASSWORD = env('IARC_V2_STORE_PASSWORD', default=None)
IARC_V2_SERVICE_ENDPOINT = env('IARC_V2_SERVICE_ENDPOINT', default=None)
IARC_V2_SUBMISSION_ENDPOINT = 'https://iarc-int.azurewebsites.net/Hosted/Index'
PAYMENT_PROVIDERS = env.list('PAYMENT_PROVIDERS', default=['bango'])
DEFAULT_PAYMENT_PROVIDER = env('DEFAULT_PAYMENT_PROVIDER', default='bango')
PRE_GENERATE_APKS = True
PRE_GENERATE_APK_URL = env('PRE_GENERATE_APK_URL')
FXA_AUTH_DOMAIN = env('FXA_AUTH_DOMAIN')
FXA_OAUTH_URL = env('FXA_OAUTH_URL')
FXA_CLIENT_ID = env('FXA_CLIENT_ID')
FXA_CLIENT_SECRET = env('FXA_CLIENT_SECRET')
FXA_SECRETS[FXA_CLIENT_ID] = FXA_CLIENT_SECRET
QA_APP_ID = 500427
RECOMMENDATIONS_API_URL = env('RECOMMENDATIONS_API_URL')
RECOMMENDATIONS_ENABLED = True
DEV_PAY_PROVIDERS = {
APP_PURCHASE_TYP: SITE_URL + '/mozpay/?req={jwt}',
}
# Bug 1145338
IAF_OVERRIDE_APPS = env.list('IAF_OVERRIDE_APPS')
| bsd-3-clause |
OnRampOrg/onramp | modules/FWC/bin/onramp_run.py | 2 | 1220 | #!/usr/bin/env python
#
# Curriculum Module Run Script
# - Run once per run of the module by a user
# - Run inside job submission. So in an allocation.
# - onramp_run_params.cfg file is available in current working directory
#
import os
import sys
from subprocess import call
from configobj import ConfigObj
#
# Read the configobj values
#
# This will always be the name of the file, so fine to hardcode here
conf_file = "onramp_runparams.cfg"
# Already validated the file in our onramp_preprocess.py script - no need to do it again
config = ConfigObj(conf_file)
#
# Load any modules for compiling
# - need to load mpi module on flux
#
try:
rtn = check_call("module load mpi")
except CalledProcessError as e:
print "Error loading module.\nError: %s" % e
sys.exit(-1)
#
# Run my program
#
os.chdir('src')
call(['time', 'mpirun', '-np', '1', 'FWC-serial', '-h', config['FWC']['grid_height'], '-w', config['FWC']['grid_width']])
# Exit 0 if all is ok
sys.exit(0)
# Exit with a negative value if there was a problem
#sys.exit(-1)
| bsd-3-clause |
dlazz/ansible | lib/ansible/modules/network/panos/panos_cert_gen_ssh.py | 61 | 6225 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_cert_gen_ssh
short_description: generates a self-signed certificate using SSH protocol with SSH key
description:
- This module generates a self-signed certificate that can be used by GlobalProtect client, SSL connector, or
- otherwise. Root certificate must be preset on the system first. This module depends on paramiko for ssh.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
notes:
- Checkmode is not supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
key_filename:
description:
- Location of the filename that is used for the auth. Either I(key_filename) or I(password) is required.
required: true
password:
description:
- Password credentials to use for auth. Either I(key_filename) or I(password) is required.
required: true
cert_friendly_name:
description:
- Human friendly certificate name (not CN but just a friendly name).
required: true
cert_cn:
description:
- Certificate CN (common name) embedded in the certificate signature.
required: true
signed_by:
description:
- Undersigning authority (CA) that MUST already be presents on the device.
required: true
rsa_nbits:
description:
- Number of bits used by the RSA algorithm for the certificate generation.
default: "2048"
'''
EXAMPLES = '''
# Generates a new self-signed certificate using ssh
- name: generate self signed certificate
panos_cert_gen_ssh:
ip_address: "192.168.1.1"
password: "paloalto"
cert_cn: "1.1.1.1"
cert_friendly_name: "test123"
signed_by: "root-ca"
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import time
try:
import paramiko
HAS_LIB = True
except ImportError:
HAS_LIB = False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time() - now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def generate_cert(module, ip_address, key_filename, password,
cert_cn, cert_friendly_name, signed_by, rsa_nbits):
stdout = ""
client = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not key_filename:
client.connect(ip_address, username="admin", password=password)
else:
client.connect(ip_address, username="admin", key_filename=key_filename)
shell = client.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# generate self-signed certificate
if isinstance(cert_cn, list):
cert_cn = cert_cn[0]
cmd = 'request certificate generate signed-by {0} certificate-name {1} name {2} algorithm RSA rsa-nbits {3}\n'.format(
signed_by, cert_friendly_name, cert_cn, rsa_nbits)
shell.send(cmd)
# wait for the shell to complete
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# exit
shell.send('exit\n')
if 'Success' not in buff:
module.fail_json(msg="Error generating self signed certificate: " + stdout)
client.close()
return stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
key_filename=dict(),
password=dict(no_log=True),
cert_cn=dict(required=True),
cert_friendly_name=dict(required=True),
rsa_nbits=dict(default='2048'),
signed_by=dict(required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['key_filename', 'password']])
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
key_filename = module.params["key_filename"]
password = module.params["password"]
cert_cn = module.params["cert_cn"]
cert_friendly_name = module.params["cert_friendly_name"]
signed_by = module.params["signed_by"]
rsa_nbits = module.params["rsa_nbits"]
try:
stdout = generate_cert(module,
ip_address,
key_filename,
password,
cert_cn,
cert_friendly_name,
signed_by,
rsa_nbits)
except Exception as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
daphne-yu/aubio | python.old/aubio/task/cut.py | 13 | 1576 | from task import task
from aubio.aubioclass import *
class taskcut(task):
def __init__(self,input,slicetimes,params=None,output=None):
""" open the input file and initialize arguments
parameters should be set *before* calling this method.
"""
from os.path import basename,splitext
task.__init__(self,input,output=None,params=params)
self.soundoutbase, self.soundoutext = splitext(basename(self.input))
self.newname = "%s%s%09.5f%s%s" % (self.soundoutbase,".",
self.frameread*self.params.step,".",self.soundoutext)
self.fileo = sndfile(self.newname,model=self.filei)
self.myvec = fvec(self.params.hopsize,self.channels)
self.mycopy = fvec(self.params.hopsize,self.channels)
self.slicetimes = slicetimes
def __call__(self):
task.__call__(self)
# write to current file
if len(self.slicetimes) and self.frameread >= self.slicetimes[0][0]:
self.slicetimes.pop(0)
# write up to 1st zero crossing
zerocross = 0
while ( abs( self.myvec.get(zerocross,0) ) > self.params.zerothres ):
zerocross += 1
writesize = self.fileo.write(zerocross,self.myvec)
fromcross = 0
while (zerocross < self.readsize):
for i in range(self.channels):
self.mycopy.set(self.myvec.get(zerocross,i),fromcross,i)
fromcross += 1
zerocross += 1
del self.fileo
self.fileo = sndfile("%s%s%09.5f%s%s" % (self.soundoutbase,".",
self.frameread*self.params.step,".",self.soundoutext),model=self.filei)
writesize = self.fileo.write(fromcross,self.mycopy)
else:
writesize = self.fileo.write(self.readsize,self.myvec)
| gpl-3.0 |
derekjchow/models | research/morph_net/op_regularizers/conv_group_lasso_regularizer_test.py | 2 | 3639 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for op_regularizers.conv_group_lasso_regularizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from morph_net.op_regularizers import conv_group_lasso_regularizer
layers = tf.contrib.layers
ALIVE_THRESHOLD = 1.0
def assert_not_all_are_alive_or_dead(alive_vector):
assert not all(alive_vector), (
'All activations are alive, test case is trivial. Increase threshold')
assert any(alive_vector), (
'All activations are dead, test case is trivial. Decrease threshold')
class GroupLassoRegularizerTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
tf.set_random_seed(7907)
with tf.contrib.framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
weights_initializer=tf.random_normal_initializer):
self.BuildModel()
with self.test_session():
tf.global_variables_initializer().run()
def BuildModel(self):
image = tf.constant(0.0, shape=[1, 17, 19, 3])
conv = layers.conv2d(image, 13, [7, 5], padding='SAME', scope='conv')
layers.conv2d_transpose(conv, 11, [5, 5], scope='convt')
# For Conv2D (Conv2DBackpropInput, aka conv2d transpose), the reduction
# indices for group lasso are (0, 1, 2) ((0, 1, 3)).
@parameterized.named_parameters(
('_regular_conv', 'conv/Conv2D', (0, 1, 2), 0.0),
('_transpose_conv', 'convt/conv2d_transpose', (0, 1, 3), 0.0),
('_regular_conv_l10.5', 'conv/Conv2D', (0, 1, 2), 0.5))
def testOp(self, op_name, axis, l1_fraction):
op = tf.get_default_graph().get_operation_by_name(op_name)
with self.test_session():
weights = op.inputs[1].eval()
l1_reg_vector = np.mean(np.abs(weights), axis=axis)
l2_reg_vector = np.sqrt(np.mean(weights**2, axis=axis))
expected_reg_vector = (
l1_fraction * l1_reg_vector + (1.0 - l1_fraction) * l2_reg_vector)
# We choose the threshold at the expectation value, so that some activations
# end up above threshold and others end up below. The weights are normally
# distributed, so the L2 norm is 1.0, and the L1 norm is sqrt(2/pi).
# With a general l1_fraction, we compute a weighted average of the two:
threshold = (1.0 - l1_fraction) + l1_fraction * np.sqrt(2 / np.pi)
expected_alive = expected_reg_vector > threshold
assert_not_all_are_alive_or_dead(expected_alive)
conv_reg = (
conv_group_lasso_regularizer.ConvGroupLassoRegularizer(
op, threshold=threshold, l1_fraction=l1_fraction))
with self.test_session():
actual_reg_vector = conv_reg.regularization_vector.eval()
actual_alive = conv_reg.alive_vector.eval()
self.assertAllClose(expected_reg_vector, actual_reg_vector)
self.assertAllEqual(expected_alive, actual_alive)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
SerpentCS/odoo | addons/website_report/controllers/main.py | 243 | 1460 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.website.controllers.main import Website
from openerp.http import request, route
class Website(Website):
@route()
def customize_template_get(self, xml_id, full=False):
res = super(Website, self).customize_template_get(xml_id, full=full)
if full:
for r in request.session.get('report_view_ids', []):
res += super(Website, self).customize_template_get(r.get('xml_id'), full=full)
return res
| agpl-3.0 |
mrhubbs/merge_csv | work.py | 1 | 2511 | """
10-20-15
"""
import tempfile
import csv
def load_csv_as_dict(csv_path):
"""
Loads a CSV into a dictionary.
"""
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header_row = reader.next()
dat = [[] for _ in header_row]
row_len = len(header_row)
for row in reader:
row_idx = 0
if len(row) < row_len:
print("row too small, skipping")
continue
while row_idx < row_len:
try:
val = float(row[row_idx])
except (ValueError, SyntaxError):
val = str(row[row_idx])
dat[row_idx].append(val)
row_idx += 1
return {h: d for h, d in zip(header_row, dat)}
def save_dict_as_csv(dat, csv_path):
"""
Saves, in the CSV format, the data in the dict dat to the file
specified by csv_path.
"""
# Create a temporary csv file to write to.
csv_temp = tempfile.TemporaryFile()
writer = csv.writer(csv_temp, delimiter=',')
# Write the header.
writer.writerow(dat.keys())
# Write the rest of the data.
idx = 0
the_data = dat.values()
length = len(the_data[0])
header_range = range(len(dat.keys()))
while idx < length:
# Build the row.
row = [the_data[i][idx] for i in header_range]
# Write the row.
writer.writerow(row)
idx += 1
# Copy the temporary csv file to the actual file we should be outputting
# to. Not writing directly to our output file prevents us from corrupting
# it if something goes wrong.
copy_temp_file(csv_temp, csv_path)
csv_temp.close()
def get_smallest_number_of_lines(d):
lengths = [len(i) for i in d.values()]
if len(lengths) < 1:
return 0
else:
return min(lengths)
def truncate_dict(d, length):
for key, value in d.items():
d[key] = value[:length]
return d
def merge_dicts_by_mappings(dicts, mappings):
out = {}
for dictkey, mappings in mappings.items():
for _from, _to in mappings:
out[_to] = dicts[dictkey][_from]
return out
def copy_temp_file(temp_fd, fpath, bs=4096):
"""
Copies all data written to temp_fd to the file specified by fpath.
"""
temp_fd.seek(0)
copy = open(fpath, 'w')
dat = temp_fd.read(bs)
while dat:
copy.write(dat)
dat = temp_fd.read(bs)
copy.close()
| gpl-2.0 |
Jenselme/AutobahnPython | examples/twisted/wamp/rpc/arguments/backend.py | 2 | 2587 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component providing procedures with different kinds
of arguments.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def ping():
return
def add2(a, b):
return a + b
def stars(nick="somebody", stars=0):
return u"{} starred {}x".format(nick, stars)
def orders(product, limit=5):
return [u"Product {}".format(i) for i in range(50)][:limit]
def arglen(*args, **kwargs):
return [len(args), len(kwargs)]
yield self.register(ping, u'com.arguments.ping')
yield self.register(add2, u'com.arguments.add2')
yield self.register(stars, u'com.arguments.stars')
yield self.register(orders, u'com.arguments.orders')
yield self.register(arglen, u'com.arguments.arglen')
print("Procedures registered; ready for frontend.")
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
)
runner.run(Component)
| mit |
cnbeining/videospeeder | videospeeder.py | 2 | 8729 | #!/usr/bin/env python
#coding:utf-8
# Author: Beining --<ACICFG>
# Contact: http://www.cnbeining.com/ |https://github.com/cnbeining/videospeeder
# Purpose: Acceletate video to bypass Letvcloud's transcode.
# Created: 08/28/2014
# LICENSE: GNU v2
import sys
import os
import os, sys, subprocess, shlex, re
from subprocess import call
import uuid
import math
import shutil
import getopt
#----------------------------------------------------------------------
def probe_file(filename):
cmnd = ['ffprobe', '-show_format', '-pretty', '-loglevel', 'quiet', filename]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print filename
out, err = p.communicate()
#print out
if err:
print err
return None
return out
#----------------------------------------------------------------------
def time_to_sec(time_raw):
"""
str->int
ignore .*."""
time_list = time_raw.split(':')
hr = int(time_list[0]) * 3600
minute = int(time_list[1]) * 60
sec = int(float(time_list[2]))
return int(hr + minute + sec)
#----------------------------------------------------------------------
def get_abspath(filename):
""""""
return str(os.path.abspath(filename))
#----------------------------------------------------------------------
def process(filename, target_bitrate, speedtime, outputfile):
"""str,int,float,str->?
filename,outputfile comes with the path."""
tmpdir = '/tmp/videospeeder-' + str(uuid.uuid4())
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
audio_format = ''
audio_duration = ''
video_duration_sec = 0
video_size_byte = 0
audio_bitrate = ''
audio_duration_sec = 0
audio_size_byte = 0
video_format = ''
video_duration = ''
video_bitrate = ''
#demux audio file
print('INFO: Checking audio...')
os.system('ffmpeg -i \'' + filename + '\' -vn -c:a copy ' + tmpdir+'/audio.aac' +' > /dev/null 2>&1')
try:
for line in probe_file(tmpdir+'/audio.aac').split('\n'):
if 'format_name' in line:
audio_format = str(line.split('=')[1])
if 'duration' in line:
audio_duration = str(line.split('=')[1])
except:
print('ERROR: Cannot read audio file!')
shutil.rmtree(tmpdir)
exit()
#In case someone screw the audio up
if not 'aac' in audio_format:
print(audio_format)
print('ERROR: You have to use AAC as audio format!')
shutil.rmtree(tmpdir)
exit()
#Check original file
try:
for line in probe_file(filename).split('\n'):
if 'duration' in line:
video_duration = str(line.split('=')[1])
#Sti. there's a tag called "totalduration"...
break
except:
print('ERROR: Cannot read video file!')
shutil.rmtree(tmpdir)
exit()
#Calc...
#By bitrate
if target_bitrate != 0:
print('INFO: Doing calculation...')
try:
video_duration_sec = time_to_sec(video_duration)
video_size_byte = int(os.path.getsize(filename))
audio_duration_sec = time_to_sec(audio_duration)
audio_size_byte = int(os.path.getsize(tmpdir+'/audio.aac'))
except:
print('ERROR: Cannot calculate time, did you input a bitrate too high?')
shutil.rmtree(tmpdir)
exit()
try:
os.remove(tmpdir+'/audio.aac')
pass
except:
print('WARNING: Cannot remove the aac file now...')
time_audio = float(((audio_size_byte * 8.0) / audio_duration_sec) / 1180000)
time_video = float(((video_size_byte * 8.0) / video_duration_sec) / target_bitrate)
if time_audio < 1 and time_video < 1:
print('ERROR: Cannot calculate target, your target bitrate is higher than the original file!')
shutil.rmtree(tmpdir)
exit()
if time_audio == 1 and time_video == 1:
speedtime = 1.1
elif time_audio > time_video:
speedtime = time_audio
else:
speedtime = time_video
#Make patch
print('INFO: Adding ' + str(speedtime - 1) + ' times to audio...')
py_path = sys.path[0]
os.chdir(py_path)
os.system('ffmpeg -i \'' + filename + '\' -c copy ' + tmpdir+'/video.mkv' +'> /dev/null 2>&1')
os.system('mkvextract timecodes_v2 '+ tmpdir + '/video.mkv 0:' + tmpdir +'/tc-track0.txt '+ '1:' + tmpdir +'/tc-track1.txt > /dev/null 2>&1')
#Video
f = open(tmpdir + '/tc-track0.txt', 'r')
video_timecode = f.readlines()
f.close()
video_timecode_speed = '# timecode format v2' + '\n'
for i in video_timecode[1:]:
video_timecode_speed = video_timecode_speed + str(float(i.strip()) * speedtime) + '\n'
f = open(tmpdir + '/video_timecode_speed.txt', 'w')
f.write(video_timecode_speed)
f.close()
#Audio
f = open(tmpdir + '/tc-track1.txt', 'r')
audio_timecode = f.readlines()
f.close()
audio_timecode_speed = '# timecode format v2' + '\n'
for i in audio_timecode[1:]:
audio_timecode_speed = audio_timecode_speed + str(float(i.strip()) * speedtime) + '\n'
f = open(tmpdir + '/audio_timecode_speed.txt', 'w')
f.write(audio_timecode_speed)
f.close()
py_path = sys.path[0]
os.chdir(py_path)
print('INFO: Making patched mkv...')
os.system('mkvmerge -o ' + tmpdir + '/video_patched.mkv --timecodes 0:' + tmpdir + '/video_timecode_speed.txt --timecodes 1:' + tmpdir + '/audio_timecode_speed.txt ' +tmpdir + '/video.mkv > /dev/null 2>&1')
try:
os.remove(tmpdir+'/video.mkv')
pass
except:
print('WARNING: Cannot remove the temporary mkv file now...')
print('INFO: Making final output file...')
os.system('ffmpeg -i ' + tmpdir + '/video_patched.mkv -c copy '+outputfile +'> /dev/null 2>&1')
print('Done!')
#clean up
try:
shutil.rmtree(tmpdir)
except:
print('ERROR: Cannot remove temp dir, do it by yourself!')
#----------------------------------------------------------------------
def usage():
""""""
print('''Usage:
python videospeeder.py (-h) (-i input.mp4) (-o output.mp4) (-b 0) (-x 3)
-h: Default: None
Help.
-i: Default: Blank
Input file.
If the file and audioblacker are not under the same path,
it is suggested to use absolute path to avoid possible failure.
-o Default: input_filename.black.mp4
Output file.
Would be in the same folder with the original file if not specified.
-b: Default: 0
Target bitrate.
-x: Default: 3
Target speeding time.
If bitrate is set, it will override the speeding time, if also set.
Videospeeder will calculate both audio and video timing to make sure
that both the audio and the video meets the requirments.
Please notice that if your original video/audio bitrate is too small,
Videospeeder would throw you an ERROR and quit.
''')
#----------------------------------------------------------------------
if __name__=='__main__':
argv_list = []
argv_list = sys.argv[1:]
filename = ''
target_bitrate = 0
outputfile = ''
speedtime = 3
try:
opts, args = getopt.getopt(argv_list, "hi:b:x:o:", ['help', 'input','bitrate', 'speedtime'
'outputfile'])
except getopt.GetoptError:
usage()
exit()
for o, a in opts:
if o in ('-h', '--help'):
usage()
exit()
elif o in ('-i', '--input'):
filename = a
try:
argv_list.remove('-i')
except:
break
elif o in ('-b', '--bitrate'):
target_bitrate = int(a)
try:
argv_list.remove('-b')
except:
break
elif o in ('-x', '--speedtime'):
speedtime = int(a)
try:
argv_list.remove('-x')
except:
break
elif o in ('-o', '--outputfile'):
outputfile = a
try:
argv_list.remove('-o')
except:
break
if filename == '':
print('ERROR: No input file!')
exit()
if outputfile == '':
outputfile = filename.split('.')[0]
for i in filename.split('.')[1:-1]:
outputfile = outputfile + '.' + i
outputfile = outputfile + '.speed.mp4'
process(filename, target_bitrate, speedtime, outputfile)
exit() | gpl-2.0 |
2mny/mylar | mylar/torrent/clients/utorrent.py | 2 | 2494 | import os
from libs.utorrent.client import UTorrentClient
# Only compatible with uTorrent 3.0+
class TorrentClient(object):
def __init__(self):
self.conn = None
def connect(self, host, username, password):
if self.conn is not None:
return self.conn
if not host:
return False
if username and password:
self.conn = UTorrentClient(
host,
username,
password
)
else:
self.conn = UTorrentClient(host)
return self.conn
def find_torrent(self, hash):
try:
torrent_list = self.conn.list()[1]
for t in torrent_list['torrents']:
if t[0] == hash:
torrent = t
except Exception:
raise
return torrent if torrent else False
def get_torrent(self, torrent):
if not torrent[26]:
raise 'Only compatible with uTorrent 3.0+'
torrent_files = []
torrent_completed = False
torrent_directory = os.path.normpath(torrent[26])
try:
if torrent[4] == 1000:
torrent_completed = True
files = self.conn.getfiles(torrent[0])[1]['files'][1]
for f in files:
if not os.path.normpath(f[0]).startswith(torrent_directory):
file_path = os.path.join(torrent_directory, f[0].lstrip('/'))
else:
file_path = f[0]
torrent_files.append(file_path)
torrent_info = {
'hash': torrent[0],
'name': torrent[2],
'label': torrent[11] if torrent[11] else '',
'folder': torrent[26],
'completed': torrent_completed,
'files': torrent_files,
}
except Exception:
raise
return torrent_info
def start_torrent(self, torrent_hash):
return self.conn.start(torrent_hash)
def stop_torrent(self, torrent_hash):
return self.conn.stop(torrent_hash)
def delete_torrent(self, torrent):
deleted = []
try:
files = self.conn.getfiles(torrent[0])[1]['files'][1]
for f in files:
deleted.append(os.path.normpath(os.path.join(torrent[26], f[0])))
self.conn.removedata(torrent[0])
except Exception:
raise
return deleted
| gpl-3.0 |
gnowgi/gnowsys-studio | objectapp/spam_checker/backends/all_is_spam.py | 3 | 2624 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""All is spam, spam checker backend for Objectapp"""
def backend(comment, content_object, request):
"""Backend for setting all comments to spam"""
return True
| agpl-3.0 |
louietsai/python-for-android | python3-alpha/python3-src/Lib/test/test_docxmlrpc.py | 54 | 7827 | from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
from test import support
threading = support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>\n<dl><dt><a name="-test_method">'
b'<strong>test_method</strong></a>(arg)</dt><dd><tt>Test '
b'method\'s docs. This method truly does'
b' very little.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
| apache-2.0 |
vabue/RatticWeb | cred/migrations/0009_auto__del_field_cred_category.py | 7 | 5872 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Cred.category'
db.delete_column('cred_cred', 'category_id')
def backwards(self, orm):
# Adding field 'Cred.category'
db.add_column('cred_cred', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cred.Tag'], null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cred.cred': {
'Meta': {'object_name': 'Cred'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': "orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'cred.credaudit': {
'Meta': {'object_name': 'CredAudit'},
'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': "orm['auth.User']"})
},
'cred.credchangeq': {
'Meta': {'object_name': 'CredChangeQ'},
'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cred.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['cred'] | gpl-2.0 |
waprin/google-cloud-python | gcloud/streaming/test_util.py | 8 | 1660 | import unittest2
class Test_calculate_wait_for_retry(unittest2.TestCase):
def _callFUT(self, *args, **kw):
from gcloud.streaming.util import calculate_wait_for_retry
return calculate_wait_for_retry(*args, **kw)
def test_w_negative_jitter_lt_max_wait(self):
import random
from gcloud._testing import _Monkey
with _Monkey(random, uniform=lambda lower, upper: lower):
self.assertEqual(self._callFUT(1, 60), 1.5)
def test_w_positive_jitter_gt_max_wait(self):
import random
from gcloud._testing import _Monkey
with _Monkey(random, uniform=lambda lower, upper: upper):
self.assertEqual(self._callFUT(4, 10), 10)
class Test_acceptable_mime_type(unittest2.TestCase):
def _callFUT(self, *args, **kw):
from gcloud.streaming.util import acceptable_mime_type
return acceptable_mime_type(*args, **kw)
def test_pattern_wo_slash(self):
with self.assertRaises(ValueError) as err:
self._callFUT(['text/*'], 'BOGUS')
self.assertEqual(
err.exception.args,
('Invalid MIME type: "BOGUS"',))
def test_accept_pattern_w_semicolon(self):
with self.assertRaises(ValueError) as err:
self._callFUT(['text/*;charset=utf-8'], 'text/plain')
self.assertEqual(
err.exception.args,
('MIME patterns with parameter unsupported: '
'"text/*;charset=utf-8"',))
def test_miss(self):
self.assertFalse(self._callFUT(['image/*'], 'text/plain'))
def test_hit(self):
self.assertTrue(self._callFUT(['text/*'], 'text/plain'))
| apache-2.0 |
SGCreations/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/setuptools/tests/test_easy_install.py | 73 | 13214 | """Easy install Tests
"""
import sys
import os
import shutil
import tempfile
import unittest
import site
import contextlib
import textwrap
import tarfile
import logging
import distutils.core
from setuptools.compat import StringIO, BytesIO, next, urlparse
from setuptools.sandbox import run_setup, SandboxViolation
from setuptools.command.easy_install import (
easy_install, fix_jython_executable, get_script_args, nt_quote_arg)
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
WANTED = """\
#!%s
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""" % nt_quote_arg(fix_jython_executable(sys.executable, ""))
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestEasyInstallTest(unittest.TestCase):
def test_install_site_py(self):
dist = Distribution()
cmd = easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
self.assertTrue(os.path.exists(sitepy))
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
dist = FakeDist()
old_platform = sys.platform
try:
name, script = [i for i in next(get_script_args(dist))][0:2]
finally:
sys.platform = old_platform
self.assertEqual(script, WANTED)
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
self.assertEqual(cmd.package_index.scanned_urls, {})
# let's try without it (default behavior)
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
self.assertEqual(keys, ['link1', 'link2'])
class TestPTHFileWriter(unittest.TestCase):
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(os.getcwd()))
self.assertTrue(pth.dirty)
def test_add_from_site_is_ignored(self):
if os.name != 'nt':
location = '/test/location/does-not-have-to-exist'
else:
location = 'c:\\does_not_exist'
pth = PthDistributions('does-not_exist', [location, ])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(location))
self.assertTrue(not pth.dirty)
class TestUserInstallTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.old_enable_site = site.ENABLE_USER_SITE
self.old_file = easy_install_pkg.__file__
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
easy_install_pkg.__file__ = site.USER_SITE
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.ENABLE_USER_SITE = self.old_enable_site
easy_install_pkg.__file__ = self.old_file
def test_user_install_implied(self):
site.ENABLE_USER_SITE = True # disabled sometimes
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
self.assertTrue(cmd.user, 'user should be implied')
def test_multiproc_atexit(self):
try:
__import__('multiprocessing')
except ImportError:
# skip the test if multiprocessing is not available
return
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
def test_user_install_not_implied_without_usersite_enabled(self):
site.ENABLE_USER_SITE = False # usually enabled
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.initialize_options()
self.assertFalse(cmd.user, 'NOT user should be implied')
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
self.assertEqual(os.path.realpath(res.location),
os.path.realpath(new_location))
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
def test_setup_requires(self):
"""Regression test for Distribute issue #318
Ensure that a package with setup_requires can be installed when
setuptools is installed in the user site-packages without causing a
SandboxViolation.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar'],
'dependency_links': [os.path.abspath(self.dir)]
}
test_pkg = os.path.join(self.dir, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
f = open(test_setup_py, 'w')
f.write(textwrap.dedent("""\
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
f.close()
foobar_path = os.path.join(self.dir, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
textwrap.dedent("""\
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
try:
with reset_setup_stop_context():
run_setup(test_setup_py, ['install'])
except SandboxViolation:
self.fail('Installation caused SandboxViolation')
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class TestSetupRequires(unittest.TestCase):
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with tempdir_context() as temp_install_dir:
with environment_context(PYTHONPATH=temp_install_dir):
ei_params = ['--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts', '--install-dir', temp_install_dir,
dist_file]
with reset_setup_stop_context():
with argv_context(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
self.assertRaises(SystemExit,
easy_install_pkg.main, ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
self.assertTrue(2 <= len(p_index.requests) <= 3)
self.assertEqual(p_index.requests[0].path, '/does-not-exist/')
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with tempdir_context() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
make_trivial_sdist(
dist_path,
textwrap.dedent("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""").lstrip())
yield dist_path
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
dist = tarfile.open(dist_path, 'w:gz')
try:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
finally:
dist.close()
@contextlib.contextmanager
def tempdir_context(cd=lambda dir:None):
temp_dir = tempfile.mkdtemp()
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment_context(**updates):
old_env = os.environ.copy()
os.environ.update(updates)
try:
yield
finally:
for key in updates:
del os.environ[key]
os.environ.update(old_env)
@contextlib.contextmanager
def argv_context(repl):
old_argv = sys.argv[:]
sys.argv[:] = repl
yield
sys.argv[:] = old_argv
@contextlib.contextmanager
def reset_setup_stop_context():
"""
When the setuptools tests are run using setup.py test, and then
one wants to invoke another setup() command (such as easy_install)
within those tests, it's necessary to reset the global variable
in distutils.core so that the setup() command will run naturally.
"""
setup_stop_after = distutils.core._setup_stop_after
distutils.core._setup_stop_after = None
yield
distutils.core._setup_stop_after = setup_stop_after
| apache-2.0 |
Scalr/pecha | scalrctl/commands/farm.py | 2 | 6362 | __author__ = 'Dmitriy Korsakov'
__doc__ = 'Farm management'
import json
import copy
from scalrctl import commands
from scalrctl import click
from scalrctl import request, settings
class FarmTerminate(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms terminate --farmId <ID> --force"
post_template = {
"terminateFarmRequest": {"force": True}
}
def get_options(self):
hlp = "It is used to terminate the Server immediately ignoring scalr.system.server_terminate_timeout."
force_terminate = click.Option(('--force', 'force'), is_flag=True, default=False, help=hlp)
options = [force_terminate, ]
options.extend(super(FarmTerminate, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
force = kwargs.pop("force", None)
post_data = copy.deepcopy(self.post_template)
post_data["terminateFarmRequest"]["force"] = force
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmTerminate, self).pre(*args, **kv)
return arguments, kw
class FarmLaunch(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms launch --farmId <ID>"
post_template = {}
def pre(self, *args, **kwargs):
"""
before request is made
"""
kv = {"import-data": {}}
kv.update(kwargs)
arguments, kw = super(FarmLaunch, self).pre(*args, **kv)
return arguments, kw
class FarmClone(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms clone --farmId <ID> --name MyNewFarm"
post_template = {
"cloneFarmRequest": {"name": ""}
}
def get_options(self):
hlp = "The name of a new Farm."
name = click.Option(('--name', 'name'), required=True, help=hlp)
options = [name, ]
options.extend(super(FarmClone, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
name = kwargs.pop("name", None)
post_data = copy.deepcopy(self.post_template)
post_data["cloneFarmRequest"]["name"] = name
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmClone, self).pre(*args, **kv)
return arguments, kw
class FarmSuspend(FarmLaunch):
epilog = "Example: scalr-ctl farms suspend --farmId <ID>"
post_template = {}
class FarmResume(FarmLaunch):
epilog = "Example: scalr-ctl farms resume --farmId <ID>"
post_template = {}
class FarmLock(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farm lock --farmId <ID> --comment <COMMENT> --unlock-permission <ANYONE|OWNER|TEAM>"
post_template = {
"lockFarmRequest": {"lockComment": "", "unlockPermission": "anyone"}
}
def get_options(self):
comment = click.Option(('--lockComment', 'comment'), default="", help="Comment to lock a Farm.")
hlp = "If you would like to prevent other users unlocking the Farm you should set 'owner' options.\
With 'team' options only members of the Farm's Teams can unlock this Farm.\
Default value 'anyone' means that anyone with access can unlock this Farm."
unlock_permission = click.Option((
'--unlockPermission', 'unlock_permission'),
default="anyone", show_default=True, help=hlp)
options = [comment, unlock_permission]
options.extend(super(FarmLock, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
comment = kwargs.pop("comment", None)
unlock_permission = kwargs.pop("unlock_permission", "anyone")
post_data = copy.deepcopy(self.post_template)
post_data["lockFarmRequest"]["lockComment"] = comment
post_data["lockFarmRequest"]["unlockPermission"] = unlock_permission
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmLock, self).pre(*args, **kv)
return arguments, kw
class FarmCreateFromTemplate(commands.Action):
def pre(self, *args, **kwargs):
"""
before request is made
"""
kwargs = self._apply_arguments(**kwargs)
stdin = kwargs.pop('stdin', None)
kwargs["FarmTemplate"] = self._read_object() if stdin else self._edit_example()
return args, kwargs
def run(self, *args, **kwargs):
"""
Callback for click subcommand.
"""
hide_output = kwargs.pop('hide_output', False) # [ST-88]
args, kwargs = self.pre(*args, **kwargs)
uri = self._request_template
payload = {}
data = {}
if '{envId}' in uri and not kwargs.get('envId') and settings.envId:
kwargs['envId'] = settings.envId
if kwargs:
# filtering in-body and empty params
uri = self._request_template.format(**kwargs)
for key, value in kwargs.items():
param = '{{{}}}'.format(key)
if value and (param not in self._request_template):
data.update(value)
if self.dry_run:
click.echo('{} {} {} {}'.format(self.http_method, uri,
payload, data))
# returns dummy response
return json.dumps({'data': {}, 'meta': {}})
data = json.dumps(data)
raw_response = request.request(self.http_method, self.api_level,
uri, payload, data)
response = self.post(raw_response)
text = self._format_response(response, hidden=hide_output, **kwargs)
if text is not None:
click.echo(text)
return response
def _edit_example(self):
commentary = \
'''# The body must be a valid FarmTemplate object.
#
# Type your FarmTemplate object below this line. The above text will not be sent to the API server.'''
text = click.edit(commentary)
if text:
raw_object = "".join([line for line in text.splitlines()
if not line.startswith("#")]).strip()
else:
raw_object = ""
return json.loads(raw_object)
| apache-2.0 |
romeubertho/USP-IntroPython | django/learning_log/learning_log/learning_log/settings.py | 1 | 3158 | """
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1m0oxdx$6bd^qj+si7&+sv38rg!1y^5=e^dsmd15=_27e(o!gy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Minhas aplicacoes
'learning_logs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit |
zulip/django | tests/delete_regress/tests.py | 182 | 13415 | from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS, models, transaction
from django.db.utils import ConnectionHandler
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
new_connections = ConnectionHandler(settings.DATABASES)
self.conn2 = new_connections[DEFAULT_DB_ALIAS]
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="[email protected]"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
with self.assertRaises(TypeError):
Image.objects.values().delete()
with self.assertRaises(TypeError):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
| bsd-3-clause |
ericpre/hyperspy | hyperspy/tests/samfire/test_samfire.py | 2 | 18637 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import gc
import dill
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.samfire_utils.samfire_worker import create_worker
N_WORKERS = 1
class Mock_queue(object):
def __init__(self):
self.var = []
def put(self, value):
self.var.append(value)
def generate_test_model():
from hyperspy.signals import Signal1D
from hyperspy.components1d import Gaussian, Lorentzian
from scipy.ndimage import gaussian_filter
total = None
blurs = [1.5]
rnd = np.random.RandomState(17)
n_im = 400
radius = 5
domain = 15
# do circle/domain
cent = (domain // 2, domain // 2)
y, x = np.ogrid[-cent[0]:domain - cent[0], -cent[1]:domain - cent[1]]
mask = x * x + y * y <= radius * radius
lor_map = None
for blur in blurs:
s = Signal1D(np.ones((domain, domain, n_im)))
cent = tuple([int(0.5 * i) for i in s.data.shape[:-1]])
m0 = s.create_model()
gs01 = Lorentzian()
m0.append(gs01)
gs01.gamma.map['values'][:] = 50
gs01.gamma.map['is_set'][:] = True
gs01.centre.map['values'][:] = 300
gs01.centre.map['values'][mask] = 400
gs01.centre.map['values'] = gaussian_filter(
gs01.centre.map['values'],
blur)
gs01.centre.map['is_set'][:] = True
gs01.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 300000
gs01.A.map['values'][mask] *= 0.75
gs01.A.map['values'] = gaussian_filter(gs01.A.map['values'], blur)
gs01.A.map['is_set'][:] = True
gs02 = Gaussian()
m0.append(gs02)
gs02.sigma.map['values'][:] = 15
gs02.sigma.map['is_set'][:] = True
gs02.centre.map['values'][:] = 400
gs02.centre.map['values'][mask] = 300
gs02.centre.map['values'] = gaussian_filter(
gs02.centre.map['values'],
blur)
gs02.centre.map['is_set'][:] = True
gs02.A.map['values'][:] = 50000
gs02.A.map['is_set'][:] = True
gs03 = Lorentzian()
m0.append(gs03)
gs03.gamma.map['values'][:] = 20
gs03.gamma.map['is_set'][:] = True
gs03.centre.map['values'][:] = 100
gs03.centre.map['values'][mask] = 900
gs03.centre.map['is_set'][:] = True
gs03.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 50000
gs03.A.map['values'][mask] *= 0.
gs03.A.map['is_set'][:] = True
s11 = m0.as_signal()
if total is None:
total = s11.data.copy()
lor_map = gs01.centre.map['values'].copy()
else:
total = np.concatenate((total, s11.data), axis=1)
lor_map = np.concatenate(
(lor_map, gs01.centre.map['values'].copy()), axis=1)
s = Signal1D(total)
s.data = rnd.poisson(lam=s.data) + 0.1
s.change_dtype(np.float16)
s.estimate_poissonian_noise_variance()
m = s.inav[:, :7].create_model()
g = Gaussian()
l1 = Lorentzian()
l2 = Lorentzian()
g.sigma.value = 50
g.centre.value = 400
g.A.value = 50000
l1.gamma.value = 40
l1.centre.value = 300
l1.A.value = 300000
l2.gamma.value = 15
l2.centre.value = 100
l2.A.value = 50000
l2.centre.bmin = 0
l2.centre.bmax = 200
l2.A.bmin = 30000
l2.A.bmax = 100000
l2.gamma.bmin = 0
l2.gamma.bmax = 60
m.extend([g, l1, l2])
m.assign_current_values_to_all()
l2.active_is_multidimensional = True
return m, gs01, gs02, gs03
class TestSamfireEmpty:
def setup_method(self, method):
self.shape = (7, 15)
n_im = 50
s = hs.signals.Signal1D(np.ones(self.shape + (n_im,)) + 3.)
s.change_dtype(np.float16)
s.estimate_poissonian_noise_variance()
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Lorentzian())
m.append(hs.model.components1D.Lorentzian())
self.model = m
def teardown_method(self, method):
gc.collect()
def test_setup(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf.metadata._gt_dump is None
assert samf.pool is None
samf._setup(ipyparallel=False)
assert samf.metadata._gt_dump is not None
assert samf.pool is not None
samf.stop()
del samf
def test_samfire_init_marker(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
np.testing.assert_array_almost_equal(samf.metadata.marker,
np.zeros(self.shape))
samf.stop()
del samf
def test_samfire_init_model(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf.model is m
samf.stop()
del samf
def test_samfire_init_metadata(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert isinstance(samf.metadata, DictionaryTreeBrowser)
samf.stop()
del samf
def test_samfire_init_strategy_list(self):
from hyperspy.samfire import StrategyList
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert isinstance(samf.strategies, StrategyList)
def test_samfire_init_strategies(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
assert isinstance(samf.strategies[0],
ReducedChiSquaredStrategy)
assert isinstance(samf.strategies[1], HistogramStrategy)
samf.stop()
del samf
def test_samfire_init_fig(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf._figure is None
samf.stop()
del samf
def test_samfire_init_default(self):
m = self.model
from multiprocessing import cpu_count
samf = m.create_samfire(setup=False)
assert samf._workers == cpu_count() - 1
np.testing.assert_allclose(samf.metadata.marker, np.zeros(self.shape))
samf.stop()
del samf
def test_optional_components(self):
m = self.model
m[-1].active_is_multidimensional = False
samf = m.create_samfire(workers=N_WORKERS, setup=False)
samf.optional_components = [m[0], 1]
samf._enable_optional_components()
assert m[0].active_is_multidimensional
assert m[1].active_is_multidimensional
assert np.all([isinstance(a, int)
for a in samf.optional_components])
np.testing.assert_equal(samf.optional_components, [0, 1])
samf.stop()
del samf
def test_swap_dict_and_model(self):
m = self.model
for i in range(len(m)):
for ip, p in enumerate(m[i].parameters):
p.map['values'][0, 0] = 3.0 + i + ip
p.map['std'][0, 0] = 2.44 + i + ip
p.map['is_set'][0, 0] = True
m[1].active_is_multidimensional = True
m[1]._active_array[0, 0] = False
assert m[1]._active_array[1, 0]
m.chisq.data[0, 0] = 1200.
m.dof.data[0, 0] = 1.
small_m = m.inav[0, 0]
d = {'chisq.data': np.array(small_m.chisq.data[0]),
'dof.data': np.array(small_m.dof.data[0]),
'components': {component.name: {parameter.name: parameter.map for
parameter in component.parameters}
for component in small_m if component.active}
}
d = copy.deepcopy(d)
samf = m.create_samfire(workers=N_WORKERS, setup=False)
samf._swap_dict_and_model((1, 0), d)
assert m.chisq.data[1, 0] == 1200.
assert m.dof.data[1, 0] == 1.
assert d['dof.data'] == 0.
assert np.isnan(d['chisq.data'])
assert np.all(~m[1]._active_array[:2, 0])
for c in m:
if c.active:
for p in c.parameters:
assert (
p.map['values'][
0, 0] == p.map['values'][
1, 0])
assert p.map['std'][0, 0] == p.map['std'][1, 0]
assert (
p.map['is_set'][
0, 0] == p.map['is_set'][
1, 0])
samf.stop()
del samf
def test_next_pixels(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
ans = samf._next_pixels(3)
assert len(ans) == 0
ind_list = [(1, 2), (0, 1), (3, 3), (4, 6)]
for ind in ind_list:
samf.metadata.marker[ind] += 2.
ans = samf._next_pixels(10)
assert len(ans) == 4
for ind in ans:
assert ind in ind_list
for n, ind in enumerate(ind_list):
samf.metadata.marker[ind] += n
ans = samf._next_pixels(10)
assert ans == [(4, 6), ]
samf.stop()
del samf
def test_change_strategy(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
ind = (0, 0)
samf.metadata.marker[ind] = -2
samf.strategies.append(ReducedChiSquaredStrategy())
samf.change_strategy(2)
assert samf.metadata.marker[ind] == -1
assert samf._active_strategy_ind == 2
samf.change_strategy(samf.strategies[1])
assert samf._active_strategy_ind == 1
assert samf.metadata.marker[ind] == -2
new_strat = HistogramStrategy()
samf.strategies.append(new_strat)
samf.change_strategy(3)
assert samf._active_strategy_ind == 3
assert samf.active_strategy is new_strat
assert samf.metadata.marker[ind] == -2
samf.stop()
del samf
@pytest.mark.xfail(reason="Sometimes the number of failed pixels > 3 when using multiprocessing. Unknown reason")
def test_multiprocessed():
"""This test uses multiprocessing.pool rather than ipyparallel"""
model, lor1, g, lor2 = generate_test_model()
shape = (7, 15)
model.fit()
samf = model.create_samfire(workers=N_WORKERS, ipyparallel=False)
samf.plot_every = np.nan
samf.strategies[0].radii = 1.
samf.strategies.remove(1)
samf.optional_components = [model[2]]
samf.start(bounded=True)
# let at most 3 pixels to fail randomly.
fitmask = samf.metadata.marker == -np.ones(shape)
print('number of pixels failed: {}'.format(
np.prod(shape) - np.sum(fitmask)))
assert np.sum(fitmask) >= np.prod(shape) - 5
for o_c, n_c in zip([g, lor1, lor2], model):
for p, p1 in zip(o_c.parameters, n_c.parameters):
if n_c._active_array is not None:
mask = np.logical_and(n_c._active_array, fitmask)
else:
mask = fitmask
np.testing.assert_allclose(
p1.map['values'][mask],
p.map['values'][:7, :15][mask],
rtol=0.3)
samf.stop()
del samf
gc.collect()
def test_create_worker_defaults():
worker = create_worker('worker')
assert worker.identity == 'worker'
assert worker.shared_queue is None
assert worker.result_queue is None
assert worker.individual_queue is None
np.testing.assert_equal(worker.best_AICc, np.inf)
np.testing.assert_equal(worker.best_values, [])
np.testing.assert_equal(worker.best_dof, np.inf)
np.testing.assert_equal(worker.last_time, 1)
class TestSamfireWorker:
def setup_method(self, method):
np.random.seed(17)
ax = np.arange(250)
self.widths = [5, 10, 15]
self.centres = [50, 105, 180]
self.areas = [5000, 10000, 20000]
g = hs.model.components1D.Gaussian()
g.sigma.value = self.widths[0]
g.A.value = self.areas[0]
l = hs.model.components1D.Lorentzian()
l.gamma.value = self.widths[1]
l.A.value = self.areas[1]
l1 = hs.model.components1D.Lorentzian()
l1.gamma.value = self.widths[2]
l1.A.value = self.areas[2]
d = g.function(ax - self.centres[0]) + \
l.function(ax - self.centres[1]) + \
l1.function(ax - self.centres[2])
s = hs.signals.Signal1D(np.array([d, d]))
s.add_poissonian_noise()
s.change_dtype(np.float16)
s.metadata.Signal.set_item("Noise_properties.variance",
s.deepcopy() + 1.)
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g3'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l3'
for c in m:
c.active_is_multidimensional = True
vals = {'g1': {},
'g2': {},
'g3': {},
'l1': {},
'l2': {},
'l3': {},
}
vals['g1']['centre'] = [50, 150]
vals['g1']['sigma'] = [5]
vals['g1']['A'] = [10000]
vals['l1']['centre'] = [43]
vals['l1']['gamma'] = [25]
vals['l1']['A'] = [10000]
vals['l2']['centre'] = [125]
vals['l2']['gamma'] = [8]
vals['l2']['A'] = [10000]
vals['g2']['centre'] = [105]
vals['g2']['sigma'] = [20]
vals['g2']['A'] = [10000]
vals['l3']['centre'] = [185]
vals['l3']['gamma'] = [11]
vals['l3']['A'] = [10000]
vals['g3']['centre'] = [175]
vals['g3']['sigma'] = [12]
vals['g3']['A'] = [10000]
self.vals = vals
self.model = m
self.q = Mock_queue()
self.ind = (1,)
self.args = {}
self.model_letter = 'sldkfjg'
from hyperspy.samfire_utils.fit_tests import red_chisq_test as rct
self._gt_dump = dill.dumps(rct(tolerance=1.0))
m_slice = m.inav[self.ind[::-1]]
m_slice.store(self.model_letter)
m_dict = m_slice.signal._to_dictionary(False)
m_dict['models'] = m_slice.signal.models._models.as_dictionary()
self.model_dictionary = m_dict
self.optional_comps = [1, 2, 3, 4, 5]
def teardown_method(self, method):
gc.collect()
def test_add_model(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
from hyperspy.model import BaseModel
assert isinstance(worker.model, BaseModel)
for component in worker.model:
assert not component.active_is_multidimensional
assert component.active
del worker
def test_main_result(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
worker.setup_test(self._gt_dump)
worker.set_optional_names({self.model[comp].name for comp in
self.optional_comps})
self.vals.update({
'signal.data': self.model.signal(),
'fitting_kwargs': {},
'variance.data':
self.model.signal.metadata.Signal.Noise_properties.variance()
})
keyword, (_id, _ind, result, found_solution) = \
worker.run_pixel(self.ind, self.vals)
assert _id == 'worker'
assert _ind == self.ind
assert found_solution
assert result['dof.data'][()] == 9
lor_components = [key for key in result['components'].keys() if
key.find('l') == 0]
assert len(result['components']) == 3
assert len(lor_components) == 2
gauss_name = list(set(result['components'].keys()) -
set(lor_components))[0]
gauss = result['components'][gauss_name]
np.testing.assert_allclose(gauss['A'][0]['values'], self.areas[0],
rtol=0.05)
np.testing.assert_allclose(gauss['sigma'][0]['values'], self.widths[0],
rtol=0.05)
np.testing.assert_allclose(gauss['centre'][0]['values'],
self.centres[0], rtol=0.05)
lor1 = result['components'][lor_components[0]]
lor1_values = tuple(lor1[par][0]['values'] for par in ['A', 'gamma',
'centre'])
lor2 = result['components'][lor_components[1]]
lor2_values = tuple(lor2[par][0]['values'] for par in ['A', 'gamma',
'centre'])
possible_values1 = (self.areas[1], self.widths[1], self.centres[1])
possible_values2 = (self.areas[2], self.widths[2], self.centres[2])
assert (np.allclose(lor1_values, possible_values1, rtol=0.05)
or
np.allclose(lor1_values, possible_values2, rtol=0.05))
assert (np.allclose(lor2_values, possible_values1, rtol=0.05)
or
np.allclose(lor2_values, possible_values2, rtol=0.05))
del worker
| gpl-3.0 |
spr/album-sound-check | mutagen/ogg.py | 2 | 17715 | # Copyright 2006 Joe Wreschnig <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = file(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
| gpl-2.0 |
elthariel/dff | api/gui/dialog/applymodule.py | 1 | 12581 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Francois Percot <[email protected]>
#
from types import *
from PyQt4.QtGui import QAbstractItemView, QApplication, QCheckBox, QDialog, QGridLayout, QLabel, QMessageBox,QSplitter, QTableWidget, QTableWidgetItem, QVBoxLayout, QWidget
from PyQt4.QtCore import Qt, QObject, QRect, QSize, SIGNAL
# CORE
from api.loader import *
from api.env import *
from api.vfs import *
from api.taskmanager.taskmanager import *
from api.type import *
from api.gui.box.nodecombobox import NodeComboBox
from api.gui.box.stringcombobox import StringComboBox
from api.gui.box.boolcombobox import BoolComboBox
from api.gui.box.checkbox import CheckBoxWidgetEnable
from api.gui.button.pushbutton import PushButton
from api.gui.dialog.uiapplymodule import UiApplyModule
from api.gui.widget.applymoduletable import ApplyModuleTable
from ui.gui.utils.utils import DFF_Utils
class ApplyModule(QDialog, UiApplyModule):
def __init__(self, mainWindow):
QDialog.__init__(self, mainWindow)
UiApplyModule.__init__(self)
self.setupUi(self)
self.__mainWindow = mainWindow
self.loader = loader.loader()
self.env = env.env()
self.vfs = vfs.vfs()
self.initDialog()
self.initCallback()
def initDialog(self):
self.initArguments()
self.vlayout = QVBoxLayout(self)
self.vlayout.addWidget(self.label)
self.tableModules = ApplyModuleTable(self)
self.splitter = QSplitter(Qt.Vertical, self)
self.splitter.addWidget(self.tableModules)
self.splitter.addWidget(self.argumentsContainer)
self.vlayout.addWidget(self.splitter)
self.vlayout.addWidget(self.buttonBox)
def initCallback(self):
self.connect(self.tableModules, SIGNAL("currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)"), self.currentItemChanged)
self.connect(self.buttonBox,SIGNAL("accepted()"), self.validateModule)
#self.connect(self.tableModules, SIGNAL("itemChanged(QTableWidgetItem *)"), self.itemChanged)
def currentItemChanged(self, itemCurrent, itemPrevious):
if itemCurrent :
if (itemPrevious and itemCurrent.row() <> itemPrevious.row()) or not itemPrevious:
if itemCurrent.column() == 1 :
itemType = itemCurrent
itemCurrent = self.tableModules.item(itemCurrent.row(), 0)
else :
itemType = self.tableModules.item(itemCurrent.row(), 1)
self.reloadAllArguments(str(itemCurrent.text()), str(itemType.text()))
self.tableModules.resizeTableModules()
self.tableModules.scrollToItem(itemCurrent)
def validateModule(self):
errorArg = []
for i in self.valueArgs :
if not i.optional :
if i.type == "node" :
node = self.valueArgs[i].currentNode()
if node is None :
errorArg.append(i)
else :
value = str(self.valueArgs[i].currentText())
if value == "" :
errorArg.append(i)
if len(errorArg) > 0 :
QMessageBox.warning(self, QApplication.translate("ApplyModule", "Missing Arguments", None, QApplication.UnicodeUTF8), QApplication.translate("ApplyModule", "There are missing arguments.", None, QApplication.UnicodeUTF8))
else :
self.accept()
def initAllInformations(self, nameModule, typeModule, nodesSelected):
self.__nodesSelected = nodesSelected
self.deleteAllArguments()
self.deleteList()
self.fillListModules()
if nameModule <> None :
self.loadOneItem(nameModule, typeModule)
else :
self.deleteAllArguments()
self.tableModules.setColumnWidth(0, 333)
self.tableModules.setColumnWidth(1, 43)
###### MANAGE QTABLEWIDGET ######
def deleteList(self):
self.tableModules.clearContents()
for i in range(0, self.tableModules.rowCount()) :
self.tableModules.removeRow(0)
def fillListModules(self):
modules = self.loader.modules
self.tableModules.setSortingEnabled(False)
row = self.tableModules.rowCount()
self.tableModules.setRowCount(row + len(modules))
for mod in modules :
#if str(script) <> "opendump" and type(script) == StringType :
item = QTableWidgetItem(str(mod))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
item2 = QTableWidgetItem(modules[mod].tags)
item2.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.tableModules.setItem(row, 0, item)
self.tableModules.setItem(row, 1, item2)
row = row + 1
def selectedItem(self, nameModule):
for i in range(0, self.tableModules.rowCount()) :
item = self.tableModules.item(i, 0)
if (item.text() == nameModule) :
self.tableModules.setCurrentItem(item)
return
def loadOneItem(self, nameModule, typeModule):
self.tableModules.setRowCount(1)
item = QTableWidgetItem(str(nameModule))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
item2 = QTableWidgetItem(str(typeModule))
item2.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.tableModules.setItem(0, 0, item)
self.tableModules.setItem(0, 1, item2)
self.tableModules.setCurrentItem(item)
####### MANAGE ARGUMENTS
def initArguments(self):
self.argumentsContainer = QWidget(self)
self.gridArgs = QGridLayout(self.argumentsContainer)
self.labelArgs = {}
self.valueArgs = {}
self.checkBoxArgs = {}
self.hboxArgs = {}
self.browserButtons = {}
def deleteAllArguments(self):
if self.argumentsContainer <> None :
listarg = self.argumentsContainer.children()
for i in self.labelArgs :
self.gridArgs.removeWidget(self.labelArgs[i])
#self.labelArgs[i].hide()
self.labelArgs[i].deleteLater()
if self.valueArgs[i] <> None :
self.gridArgs.removeWidget(self.valueArgs[i])
#self.valueArgs[i].hide()
self.valueArgs[i].deleteLater()
if self.browserButtons[i] != None :
self.gridArgs.removeWidget(self.browserButtons[i])
#self.browserButtons[i].hide()
self.browserButtons[i].deleteLater()
if self.checkBoxArgs[i] != None :
self.gridArgs.removeWidget(self.checkBoxArgs[i])
#self.checkBoxArgs[i].hide()
self.checkBoxArgs[i].deleteLater()
self.valueArgs.clear()
self.labelArgs.clear()
self.browserButtons.clear()
self.checkBoxArgs.clear()
# if self.argumentsContainer <> None :
# listarg = self.argumentsContainer.children()
# self.argumentsContainer.destroy(True, True)
# self.argumentsContainer = None
def reloadAllArguments(self, nameModule, type):
self.deleteAllArguments()
if self.argumentsContainer == None :
self.argumentsContainer = QWidget(self)
iterator = 0
args = DFF_Utils.getArgs(nameModule)
vars_db = self.env.vars_db
for arg in args:
label = QLabel(arg.name + " ( "+ str(arg.type) + " ) " + ":", self.argumentsContainer)
label.setMinimumSize(QSize(80, 28))
label.setMaximumSize(QSize(120, 28))
list = self.env.getValuesInDb(arg.name, arg.type)
if arg.type == "node" :
value = NodeComboBox(self.argumentsContainer)
for i in range(0, len(list)) :
value.addPath(list[i])
button = PushButton(self.argumentsContainer, value, arg.name, self.__mainWindow.QSelectNodes , self.__mainWindow.dockNodeTree.treeItemModel.rootItemVFS.node)
currentItem = self.__mainWindow.dockNodeTree.treeView.getCurrentItem()
value.addPath(currentItem.node)
if self.__nodesSelected :
list = self.__nodesSelected
for i in range(0, len(self.__nodesSelected)):
value.addPath(self.__nodesSelected[i])
elif arg.type == "int":
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(str(list[i]))
button = None
elif arg.type == "string":
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(list[i])
button = None
elif arg.type == "path" :
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(list[i])
button = PushButton(self.argumentsContainer, value, arg.name)
elif arg.type == "bool" :
value = BoolComboBox(self.argumentsContainer)
button = None
if arg.optional :
checkBox = CheckBoxWidgetEnable(self.argumentsContainer, label, value, button)
else :
checkBox = None
self.gridArgs.addWidget(label, iterator, 0)
if value != None :
self.gridArgs.addWidget(value, iterator, 1)
if button != None:
self.gridArgs.addWidget(button, iterator, 2)
if checkBox != None :
self.gridArgs.addWidget(checkBox, iterator, 3)
value.setCurrentIndex(value.count() - 1)
self.labelArgs[arg] = label
self.valueArgs[arg] = value
self.checkBoxArgs[arg] = checkBox
self.browserButtons[arg] = button
iterator = iterator + 1
def currentType(self):
item = self.tableModules.currentItem()
if item.column() == 0 :
item = self.tableModules.item(item.row() , 1)
return str(item.text())
def currentModuleName(self):
item = self.tableModules.currentItem()
if item.column() == 1 :
item = self.tableModules.item(item.row(), 0)
return str(item.text())
# get Arguments
def getDFFArguments(self):
self.arg = self.env.libenv.argument("gui_input")
self.arg.thisown = 0
for i in self.valueArgs :
if i.type == "node" :
self.arg.add_node(str(i.name), self.valueArgs[i].currentNode())
# print DFF_Utils.getPath(self.valueArgs[i].currentNode())
else :
value = str(self.valueArgs[i].currentText())
if i.type == "path" :
tmp = libtype.Path(str(value))
tmp.thisown = 0
self.arg.add_path(str(i.name), tmp)
elif i.type == "int" :
self.arg.add_int(str(i.name), int(value))
elif i.type == "string" :
self.arg.add_string(str(i.name), value)
elif i.type == "bool" :
if value == "True" :
value = 1
else :
value = 0
self.arg.add_bool(str(i.name), int(value))
self.taskmanager = TaskManager()
modules = self.currentModuleName()
self.taskmanager.add(str(modules), self.arg, ["thread", "gui"])
return self.arg
| gpl-2.0 |
jmarsik/mopidy | mopidy/http/handlers.py | 1 | 7595 | from __future__ import absolute_import, unicode_literals
import functools
import logging
import os
import socket
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.websocket
import mopidy
from mopidy import core, models
from mopidy.internal import encoding, jsonrpc
logger = logging.getLogger(__name__)
def make_mopidy_app_factory(apps, statics):
def mopidy_app_factory(config, core):
return [
(r'/ws/?', WebSocketHandler, {
'core': core,
}),
(r'/rpc', JsonRpcHandler, {
'core': core,
}),
(r'/(.+)', StaticFileHandler, {
'path': os.path.join(os.path.dirname(__file__), 'data'),
}),
(r'/', ClientListHandler, {
'apps': apps,
'statics': statics,
}),
]
return mopidy_app_factory
def make_jsonrpc_wrapper(core_actor):
objects={
'core.get_uri_schemes': core.Core.get_uri_schemes,
'core.get_version': core.Core.get_version,
'core.history': core.HistoryController,
'core.library': core.LibraryController,
'core.mixer': core.MixerController,
'core.playback': core.PlaybackController,
'core.playlists': core.PlaylistsController,
'core.tracklist': core.TracklistController,
'core.service': core.ServiceController,
}
services = core_actor.get_public_service_classes().get()
for t in services.keys():
objects[t] = services[t]
inspector = jsonrpc.JsonRpcInspector(objects)
objects={
'core.describe': inspector.describe,
'core.get_uri_schemes': core_actor.get_uri_schemes,
'core.get_version': core_actor.get_version,
'core.history': core_actor.history,
'core.library': core_actor.library,
'core.mixer': core_actor.mixer,
'core.playback': core_actor.playback,
'core.playlists': core_actor.playlists,
'core.tracklist': core_actor.tracklist,
'core.service': core_actor.service,
}
services = core_actor.get_public_services().get()
for t in services.keys():
objects[t] = services[t]
return jsonrpc.JsonRpcWrapper(
objects,
decoders=[models.model_json_decoder],
encoders=[models.ModelJSONEncoder]
)
def _send_broadcast(client, msg):
# We could check for client.ws_connection, but we don't really
# care why the broadcast failed, we just want the rest of them
# to succeed, so catch everything.
try:
client.write_message(msg)
except Exception as e:
error_msg = encoding.locale_decode(e)
logger.debug('Broadcast of WebSocket message to %s failed: %s',
client.request.remote_ip, error_msg)
# TODO: should this do the same cleanup as the on_message code?
class WebSocketHandler(tornado.websocket.WebSocketHandler):
# XXX This set is shared by all WebSocketHandler objects. This isn't
# optimal, but there's currently no use case for having more than one of
# these anyway.
clients = set()
@classmethod
def broadcast(cls, msg):
if hasattr(tornado.ioloop.IOLoop, 'current'):
loop = tornado.ioloop.IOLoop.current()
else:
loop = tornado.ioloop.IOLoop.instance() # Fallback for pre 3.0
# This can be called from outside the Tornado ioloop, so we need to
# safely cross the thread boundary by adding a callback to the loop.
for client in cls.clients:
# One callback per client to keep time we hold up the loop short
# NOTE: Pre 3.0 does not support *args or **kwargs...
loop.add_callback(functools.partial(_send_broadcast, client, msg))
def initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def open(self):
if hasattr(self, 'set_nodelay'):
# New in Tornado 3.1
self.set_nodelay(True)
else:
self.stream.socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.clients.add(self)
logger.debug(
'New WebSocket connection from %s', self.request.remote_ip)
def on_close(self):
self.clients.discard(self)
logger.debug(
'Closed WebSocket connection from %s',
self.request.remote_ip)
def on_message(self, message):
if not message:
return
logger.debug(
'Received WebSocket message from %s: %r',
self.request.remote_ip, message)
try:
response = self.jsonrpc.handle_json(
tornado.escape.native_str(message))
if response and self.write_message(response):
logger.debug(
'Sent WebSocket message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
error_msg = encoding.locale_decode(e)
logger.error('WebSocket request error: %s', error_msg)
if self.ws_connection:
# Tornado 3.2+ checks if self.ws_connection is None before
# using it, but not older versions.
self.close()
def check_origin(self, origin):
# Allow cross-origin WebSocket connections, like Tornado before 4.0
# defaulted to.
return True
def set_mopidy_headers(request_handler):
request_handler.set_header('Cache-Control', 'no-cache')
request_handler.set_header(
'X-Mopidy-Version', mopidy.__version__.encode('utf-8'))
class JsonRpcHandler(tornado.web.RequestHandler):
def initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def head(self):
self.set_extra_headers()
self.finish()
def post(self):
data = self.request.body
if not data:
return
logger.debug(
'Received RPC message from %s: %r', self.request.remote_ip, data)
try:
self.set_extra_headers()
response = self.jsonrpc.handle_json(
tornado.escape.native_str(data))
if response and self.write(response):
logger.debug(
'Sent RPC message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
logger.error('HTTP JSON-RPC request error: %s', e)
self.write_error(500)
def set_extra_headers(self):
set_mopidy_headers(self)
self.set_header('Accept', 'application/json')
self.set_header('Content-Type', 'application/json; utf-8')
class ClientListHandler(tornado.web.RequestHandler):
def initialize(self, apps, statics):
self.apps = apps
self.statics = statics
def get_template_path(self):
return os.path.dirname(__file__)
def get(self):
set_mopidy_headers(self)
names = set()
for app in self.apps:
names.add(app['name'])
for static in self.statics:
names.add(static['name'])
names.discard('mopidy')
self.render('data/clients.html', apps=sorted(list(names)))
class StaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
set_mopidy_headers(self)
class AddSlashHandler(tornado.web.RequestHandler):
@tornado.web.addslash
def prepare(self):
return super(AddSlashHandler, self).prepare()
| apache-2.0 |
tannoa2/RackHD | test/tests/rackhd20/test_rackhd20_api_tags.py | 13 | 2375 | '''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Local methods
MON_NODES = fit_common.node_select()
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_tags(fit_common.unittest.TestCase):
def test_api_20_nodes_ID_tags(self):
# iterate through nodes
for nodeid in MON_NODES:
#add tag
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags", action="patch", payload={"tags":["test_tag_" + nodeid]})
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
#check tag
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertIn("test_tag_" + nodeid, fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
def test_api_20_tags_post_delete(self):
# create dummy node
data_payload = {"name": "testnode", "identifiers": ["FF", "FF"], "type": "compute"}
nodeid = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=data_payload)['json']['id']
# add tags
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags", action="patch", payload={"tags":["test_node","dummy_node"]})
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# check tags
api_data = fit_common.rackhdapi("/api/2.0/tags/test_node/nodes")
self.assertIn("test_node", fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
self.assertIn("dummy_node", fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
# delete node
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid, action="delete")
self.assertEqual(api_data['status'], 204, 'Incorrect HTTP return code, expected 204, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
| apache-2.0 |
EdLogan18/logan-repository | plugin.video.igorlista/mechanize/_html.py | 132 | 20888 | """HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import codecs
import copy
import htmlentitydefs
import re
import _sgmllib_copy as sgmllib
import _beautifulsoup
import _form
from _headersutil import split_header_words, is_html as _is_html
import _request
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
encoding = v
try:
codecs.lookup(v)
except LookupError:
continue
else:
return encoding
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
class Args(object):
# idea for this argument-processing trick is from Peter Otten
def __init__(self, args_map):
self.__dict__["dictionary"] = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def __setattr__(self, key, value):
if key == "dictionary":
raise AttributeError()
self.dictionary[key] = value
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for e.g.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ParseResponse argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
self.select_default = select_default
if form_parser_class is None:
form_parser_class = _form.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
encoding = self.encoding
forms = _form.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
bs = self._bs
base_url = self._base_url
encoding = self._encoding
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = _form.RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set request class (mechanize.Request by default).
HTMLForm instances returned by .forms() will return instances of this
class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by mechanize.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
| gpl-2.0 |
PoornimaNayak/autotest-client-tests | linux-tools/perl_IO_Socket_SSL/perl_IO_Socket_SSL.py | 4 | 1280 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_IO_Socket_SSL(test.test):
"""
Autotest module for testing basic functionality
of perl_IO_Socket_SSL
@author Athira Rajeev <[email protected]> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-IO-Socket-SSL.sh'], cwd="%s/perl_IO_Socket_SSL" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
HackerTool/vivisect | vstruct/defs/windows/win_5_1_i386/ntoskrnl.py | 7 | 243293 | # Version: 5.1
# Architecture: i386
import vstruct
from vstruct.primitives import *
POLICY_AUDIT_EVENT_TYPE = v_enum()
POLICY_AUDIT_EVENT_TYPE.AuditCategorySystem = 0
POLICY_AUDIT_EVENT_TYPE.AuditCategoryLogon = 1
POLICY_AUDIT_EVENT_TYPE.AuditCategoryObjectAccess = 2
POLICY_AUDIT_EVENT_TYPE.AuditCategoryPrivilegeUse = 3
POLICY_AUDIT_EVENT_TYPE.AuditCategoryDetailedTracking = 4
POLICY_AUDIT_EVENT_TYPE.AuditCategoryPolicyChange = 5
POLICY_AUDIT_EVENT_TYPE.AuditCategoryAccountManagement = 6
POLICY_AUDIT_EVENT_TYPE.AuditCategoryDirectoryServiceAccess = 7
POLICY_AUDIT_EVENT_TYPE.AuditCategoryAccountLogon = 8
KINTERRUPT_MODE = v_enum()
KINTERRUPT_MODE.LevelSensitive = 0
KINTERRUPT_MODE.Latched = 1
ARBITER_REQUEST_SOURCE = v_enum()
ARBITER_REQUEST_SOURCE.ArbiterRequestUndefined = -1
ARBITER_REQUEST_SOURCE.ArbiterRequestLegacyReported = 0
ARBITER_REQUEST_SOURCE.ArbiterRequestHalReported = 1
ARBITER_REQUEST_SOURCE.ArbiterRequestLegacyAssigned = 2
ARBITER_REQUEST_SOURCE.ArbiterRequestPnpDetected = 3
ARBITER_REQUEST_SOURCE.ArbiterRequestPnpEnumerated = 4
DEVICE_RELATION_TYPE = v_enum()
DEVICE_RELATION_TYPE.BusRelations = 0
DEVICE_RELATION_TYPE.EjectionRelations = 1
DEVICE_RELATION_TYPE.PowerRelations = 2
DEVICE_RELATION_TYPE.RemovalRelations = 3
DEVICE_RELATION_TYPE.TargetDeviceRelation = 4
DEVICE_RELATION_TYPE.SingleBusRelations = 5
IO_ALLOCATION_ACTION = v_enum()
IO_ALLOCATION_ACTION.KeepObject = 1
IO_ALLOCATION_ACTION.DeallocateObject = 2
IO_ALLOCATION_ACTION.DeallocateObjectKeepRegisters = 3
BUS_QUERY_ID_TYPE = v_enum()
BUS_QUERY_ID_TYPE.BusQueryDeviceID = 0
BUS_QUERY_ID_TYPE.BusQueryHardwareIDs = 1
BUS_QUERY_ID_TYPE.BusQueryCompatibleIDs = 2
BUS_QUERY_ID_TYPE.BusQueryInstanceID = 3
BUS_QUERY_ID_TYPE.BusQueryDeviceSerialNumber = 4
MMSYSTEM_PTE_POOL_TYPE = v_enum()
MMSYSTEM_PTE_POOL_TYPE.SystemPteSpace = 0
MMSYSTEM_PTE_POOL_TYPE.NonPagedPoolExpansion = 1
MMSYSTEM_PTE_POOL_TYPE.MaximumPtePoolTypes = 2
POP_POLICY_DEVICE_TYPE = v_enum()
POP_POLICY_DEVICE_TYPE.PolicyDeviceSystemButton = 0
POP_POLICY_DEVICE_TYPE.PolicyDeviceThermalZone = 1
POP_POLICY_DEVICE_TYPE.PolicyDeviceBattery = 2
POP_POLICY_DEVICE_TYPE.PolicyInitiatePowerActionAPI = 3
POP_POLICY_DEVICE_TYPE.PolicySetPowerStateAPI = 4
POP_POLICY_DEVICE_TYPE.PolicyImmediateDozeS4 = 5
POP_POLICY_DEVICE_TYPE.PolicySystemIdle = 6
MEMORY_CACHING_TYPE = v_enum()
MEMORY_CACHING_TYPE.MmNonCached = 0
MEMORY_CACHING_TYPE.MmCached = 1
MEMORY_CACHING_TYPE.MmWriteCombined = 2
MEMORY_CACHING_TYPE.MmHardwareCoherentCached = 3
MEMORY_CACHING_TYPE.MmNonCachedUnordered = 4
MEMORY_CACHING_TYPE.MmUSWCCached = 5
MEMORY_CACHING_TYPE.MmMaximumCacheType = 6
NT_PRODUCT_TYPE = v_enum()
NT_PRODUCT_TYPE.NtProductWinNt = 1
NT_PRODUCT_TYPE.NtProductLanManNt = 2
NT_PRODUCT_TYPE.NtProductServer = 3
DEVICE_POWER_STATE = v_enum()
DEVICE_POWER_STATE.PowerDeviceUnspecified = 0
DEVICE_POWER_STATE.PowerDeviceD0 = 1
DEVICE_POWER_STATE.PowerDeviceD1 = 2
DEVICE_POWER_STATE.PowerDeviceD2 = 3
DEVICE_POWER_STATE.PowerDeviceD3 = 4
DEVICE_POWER_STATE.PowerDeviceMaximum = 5
PF_SCENARIO_TYPE = v_enum()
PF_SCENARIO_TYPE.PfApplicationLaunchScenarioType = 0
PF_SCENARIO_TYPE.PfSystemBootScenarioType = 1
PF_SCENARIO_TYPE.PfMaxScenarioType = 2
TOKEN_TYPE = v_enum()
TOKEN_TYPE.TokenPrimary = 1
TOKEN_TYPE.TokenImpersonation = 2
VI_DEADLOCK_RESOURCE_TYPE = v_enum()
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockUnknown = 0
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockMutex = 1
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockFastMutex = 2
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockFastMutexUnsafe = 3
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockSpinLock = 4
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockQueuedSpinLock = 5
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockTypeMaximum = 6
FSINFOCLASS = v_enum()
FSINFOCLASS.FileFsVolumeInformation = 1
FSINFOCLASS.FileFsLabelInformation = 2
FSINFOCLASS.FileFsSizeInformation = 3
FSINFOCLASS.FileFsDeviceInformation = 4
FSINFOCLASS.FileFsAttributeInformation = 5
FSINFOCLASS.FileFsControlInformation = 6
FSINFOCLASS.FileFsFullSizeInformation = 7
FSINFOCLASS.FileFsObjectIdInformation = 8
FSINFOCLASS.FileFsDriverPathInformation = 9
FSINFOCLASS.FileFsMaximumInformation = 10
ARBITER_ACTION = v_enum()
ARBITER_ACTION.ArbiterActionTestAllocation = 0
ARBITER_ACTION.ArbiterActionRetestAllocation = 1
ARBITER_ACTION.ArbiterActionCommitAllocation = 2
ARBITER_ACTION.ArbiterActionRollbackAllocation = 3
ARBITER_ACTION.ArbiterActionQueryAllocatedResources = 4
ARBITER_ACTION.ArbiterActionWriteReservedResources = 5
ARBITER_ACTION.ArbiterActionQueryConflict = 6
ARBITER_ACTION.ArbiterActionQueryArbitrate = 7
ARBITER_ACTION.ArbiterActionAddReserved = 8
ARBITER_ACTION.ArbiterActionBootAllocation = 9
POOL_TYPE = v_enum()
POOL_TYPE.NonPagedPool = 0
POOL_TYPE.PagedPool = 1
POOL_TYPE.NonPagedPoolMustSucceed = 2
POOL_TYPE.DontUseThisType = 3
POOL_TYPE.NonPagedPoolCacheAligned = 4
POOL_TYPE.PagedPoolCacheAligned = 5
POOL_TYPE.NonPagedPoolCacheAlignedMustS = 6
POOL_TYPE.MaxPoolType = 7
POOL_TYPE.NonPagedPoolSession = 32
POOL_TYPE.PagedPoolSession = 33
POOL_TYPE.NonPagedPoolMustSucceedSession = 34
POOL_TYPE.DontUseThisTypeSession = 35
POOL_TYPE.NonPagedPoolCacheAlignedSession = 36
POOL_TYPE.PagedPoolCacheAlignedSession = 37
POOL_TYPE.NonPagedPoolCacheAlignedMustSSession = 38
PCI_DISPATCH_STYLE = v_enum()
PCI_DISPATCH_STYLE.IRP_COMPLETE = 0
PCI_DISPATCH_STYLE.IRP_DOWNWARD = 1
PCI_DISPATCH_STYLE.IRP_UPWARD = 2
PCI_DISPATCH_STYLE.IRP_DISPATCH = 3
MODE = v_enum()
MODE.KernelMode = 0
MODE.UserMode = 1
MODE.MaximumMode = 2
FS_FILTER_SECTION_SYNC_TYPE = v_enum()
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeOther = 0
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeCreateSection = 1
OB_OPEN_REASON = v_enum()
OB_OPEN_REASON.ObCreateHandle = 0
OB_OPEN_REASON.ObOpenHandle = 1
OB_OPEN_REASON.ObDuplicateHandle = 2
OB_OPEN_REASON.ObInheritHandle = 3
OB_OPEN_REASON.ObMaxOpenReason = 4
CPU_VENDORS = v_enum()
CPU_VENDORS.CPU_NONE = 0
CPU_VENDORS.CPU_INTEL = 1
CPU_VENDORS.CPU_AMD = 2
CPU_VENDORS.CPU_CYRIX = 3
CPU_VENDORS.CPU_TRANSMETA = 4
CPU_VENDORS.CPU_CENTAUR = 5
CPU_VENDORS.CPU_RISE = 6
CPU_VENDORS.CPU_UNKNOWN = 7
DEVICE_TEXT_TYPE = v_enum()
DEVICE_TEXT_TYPE.DeviceTextDescription = 0
DEVICE_TEXT_TYPE.DeviceTextLocationInformation = 1
POWER_STATE_TYPE = v_enum()
POWER_STATE_TYPE.SystemPowerState = 0
POWER_STATE_TYPE.DevicePowerState = 1
BUS_DATA_TYPE = v_enum()
BUS_DATA_TYPE.ConfigurationSpaceUndefined = -1
BUS_DATA_TYPE.Cmos = 0
BUS_DATA_TYPE.EisaConfiguration = 1
BUS_DATA_TYPE.Pos = 2
BUS_DATA_TYPE.CbusConfiguration = 3
BUS_DATA_TYPE.PCIConfiguration = 4
BUS_DATA_TYPE.VMEConfiguration = 5
BUS_DATA_TYPE.NuBusConfiguration = 6
BUS_DATA_TYPE.PCMCIAConfiguration = 7
BUS_DATA_TYPE.MPIConfiguration = 8
BUS_DATA_TYPE.MPSAConfiguration = 9
BUS_DATA_TYPE.PNPISAConfiguration = 10
BUS_DATA_TYPE.SgiInternalConfiguration = 11
BUS_DATA_TYPE.MaximumBusDataType = 12
LSA_FOREST_TRUST_RECORD_TYPE = v_enum()
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustTopLevelName = 0
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustTopLevelNameEx = 1
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustDomainInfo = 2
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustRecordTypeLast = 2
FILE_INFORMATION_CLASS = v_enum()
FILE_INFORMATION_CLASS.FileDirectoryInformation = 1
FILE_INFORMATION_CLASS.FileFullDirectoryInformation = 2
FILE_INFORMATION_CLASS.FileBothDirectoryInformation = 3
FILE_INFORMATION_CLASS.FileBasicInformation = 4
FILE_INFORMATION_CLASS.FileStandardInformation = 5
FILE_INFORMATION_CLASS.FileInternalInformation = 6
FILE_INFORMATION_CLASS.FileEaInformation = 7
FILE_INFORMATION_CLASS.FileAccessInformation = 8
FILE_INFORMATION_CLASS.FileNameInformation = 9
FILE_INFORMATION_CLASS.FileRenameInformation = 10
FILE_INFORMATION_CLASS.FileLinkInformation = 11
FILE_INFORMATION_CLASS.FileNamesInformation = 12
FILE_INFORMATION_CLASS.FileDispositionInformation = 13
FILE_INFORMATION_CLASS.FilePositionInformation = 14
FILE_INFORMATION_CLASS.FileFullEaInformation = 15
FILE_INFORMATION_CLASS.FileModeInformation = 16
FILE_INFORMATION_CLASS.FileAlignmentInformation = 17
FILE_INFORMATION_CLASS.FileAllInformation = 18
FILE_INFORMATION_CLASS.FileAllocationInformation = 19
FILE_INFORMATION_CLASS.FileEndOfFileInformation = 20
FILE_INFORMATION_CLASS.FileAlternateNameInformation = 21
FILE_INFORMATION_CLASS.FileStreamInformation = 22
FILE_INFORMATION_CLASS.FilePipeInformation = 23
FILE_INFORMATION_CLASS.FilePipeLocalInformation = 24
FILE_INFORMATION_CLASS.FilePipeRemoteInformation = 25
FILE_INFORMATION_CLASS.FileMailslotQueryInformation = 26
FILE_INFORMATION_CLASS.FileMailslotSetInformation = 27
FILE_INFORMATION_CLASS.FileCompressionInformation = 28
FILE_INFORMATION_CLASS.FileObjectIdInformation = 29
FILE_INFORMATION_CLASS.FileCompletionInformation = 30
FILE_INFORMATION_CLASS.FileMoveClusterInformation = 31
FILE_INFORMATION_CLASS.FileQuotaInformation = 32
FILE_INFORMATION_CLASS.FileReparsePointInformation = 33
FILE_INFORMATION_CLASS.FileNetworkOpenInformation = 34
FILE_INFORMATION_CLASS.FileAttributeTagInformation = 35
FILE_INFORMATION_CLASS.FileTrackingInformation = 36
FILE_INFORMATION_CLASS.FileIdBothDirectoryInformation = 37
FILE_INFORMATION_CLASS.FileIdFullDirectoryInformation = 38
FILE_INFORMATION_CLASS.FileValidDataLengthInformation = 39
FILE_INFORMATION_CLASS.FileShortNameInformation = 40
FILE_INFORMATION_CLASS.FileMaximumInformation = 41
EXCEPTION_DISPOSITION = v_enum()
EXCEPTION_DISPOSITION.ExceptionContinueExecution = 0
EXCEPTION_DISPOSITION.ExceptionContinueSearch = 1
EXCEPTION_DISPOSITION.ExceptionNestedException = 2
EXCEPTION_DISPOSITION.ExceptionCollidedUnwind = 3
PNP_VETO_TYPE = v_enum()
PNP_VETO_TYPE.PNP_VetoTypeUnknown = 0
PNP_VETO_TYPE.PNP_VetoLegacyDevice = 1
PNP_VETO_TYPE.PNP_VetoPendingClose = 2
PNP_VETO_TYPE.PNP_VetoWindowsApp = 3
PNP_VETO_TYPE.PNP_VetoWindowsService = 4
PNP_VETO_TYPE.PNP_VetoOutstandingOpen = 5
PNP_VETO_TYPE.PNP_VetoDevice = 6
PNP_VETO_TYPE.PNP_VetoDriver = 7
PNP_VETO_TYPE.PNP_VetoIllegalDeviceRequest = 8
PNP_VETO_TYPE.PNP_VetoInsufficientPower = 9
PNP_VETO_TYPE.PNP_VetoNonDisableable = 10
PNP_VETO_TYPE.PNP_VetoLegacyDriver = 11
PNP_VETO_TYPE.PNP_VetoInsufficientRights = 12
PCI_SIGNATURE = v_enum()
PCI_SIGNATURE.PciPdoExtensionType = 1768116272
PCI_SIGNATURE.PciFdoExtensionType = 1768116273
PCI_SIGNATURE.PciArb_Io = 1768116274
PCI_SIGNATURE.PciArb_Memory = 1768116275
PCI_SIGNATURE.PciArb_Interrupt = 1768116276
PCI_SIGNATURE.PciArb_BusNumber = 1768116277
PCI_SIGNATURE.PciTrans_Interrupt = 1768116278
PCI_SIGNATURE.PciInterface_BusHandler = 1768116279
PCI_SIGNATURE.PciInterface_IntRouteHandler = 1768116280
PCI_SIGNATURE.PciInterface_PciCb = 1768116281
PCI_SIGNATURE.PciInterface_LegacyDeviceDetection = 1768116282
PCI_SIGNATURE.PciInterface_PmeHandler = 1768116283
PCI_SIGNATURE.PciInterface_DevicePresent = 1768116284
PCI_SIGNATURE.PciInterface_NativeIde = 1768116285
PCI_SIGNATURE.PciInterface_AgpTarget = 1768116286
SECURITY_OPERATION_CODE = v_enum()
SECURITY_OPERATION_CODE.SetSecurityDescriptor = 0
SECURITY_OPERATION_CODE.QuerySecurityDescriptor = 1
SECURITY_OPERATION_CODE.DeleteSecurityDescriptor = 2
SECURITY_OPERATION_CODE.AssignSecurityDescriptor = 3
PP_NPAGED_LOOKASIDE_NUMBER = v_enum()
PP_NPAGED_LOOKASIDE_NUMBER.LookasideSmallIrpList = 0
PP_NPAGED_LOOKASIDE_NUMBER.LookasideLargeIrpList = 1
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMdlList = 2
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCreateInfoList = 3
PP_NPAGED_LOOKASIDE_NUMBER.LookasideNameBufferList = 4
PP_NPAGED_LOOKASIDE_NUMBER.LookasideTwilightList = 5
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCompletionList = 6
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMaximumList = 7
SECURITY_IMPERSONATION_LEVEL = v_enum()
SECURITY_IMPERSONATION_LEVEL.SecurityAnonymous = 0
SECURITY_IMPERSONATION_LEVEL.SecurityIdentification = 1
SECURITY_IMPERSONATION_LEVEL.SecurityImpersonation = 2
SECURITY_IMPERSONATION_LEVEL.SecurityDelegation = 3
DEVICE_USAGE_NOTIFICATION_TYPE = v_enum()
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeUndefined = 0
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypePaging = 1
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeHibernation = 2
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeDumpFile = 3
PROXY_CLASS = v_enum()
PROXY_CLASS.ProxyFull = 0
PROXY_CLASS.ProxyService = 1
PROXY_CLASS.ProxyTree = 2
PROXY_CLASS.ProxyDirectory = 3
PLUGPLAY_EVENT_CATEGORY = v_enum()
PLUGPLAY_EVENT_CATEGORY.HardwareProfileChangeEvent = 0
PLUGPLAY_EVENT_CATEGORY.TargetDeviceChangeEvent = 1
PLUGPLAY_EVENT_CATEGORY.DeviceClassChangeEvent = 2
PLUGPLAY_EVENT_CATEGORY.CustomDeviceEvent = 3
PLUGPLAY_EVENT_CATEGORY.DeviceInstallEvent = 4
PLUGPLAY_EVENT_CATEGORY.DeviceArrivalEvent = 5
PLUGPLAY_EVENT_CATEGORY.PowerEvent = 6
PLUGPLAY_EVENT_CATEGORY.VetoEvent = 7
PLUGPLAY_EVENT_CATEGORY.BlockedDriverEvent = 8
PLUGPLAY_EVENT_CATEGORY.MaxPlugEventCategory = 9
INTERFACE_TYPE = v_enum()
INTERFACE_TYPE.InterfaceTypeUndefined = -1
INTERFACE_TYPE.Internal = 0
INTERFACE_TYPE.Isa = 1
INTERFACE_TYPE.Eisa = 2
INTERFACE_TYPE.MicroChannel = 3
INTERFACE_TYPE.TurboChannel = 4
INTERFACE_TYPE.PCIBus = 5
INTERFACE_TYPE.VMEBus = 6
INTERFACE_TYPE.NuBus = 7
INTERFACE_TYPE.PCMCIABus = 8
INTERFACE_TYPE.CBus = 9
INTERFACE_TYPE.MPIBus = 10
INTERFACE_TYPE.MPSABus = 11
INTERFACE_TYPE.ProcessorInternal = 12
INTERFACE_TYPE.InternalPowerBus = 13
INTERFACE_TYPE.PNPISABus = 14
INTERFACE_TYPE.PNPBus = 15
INTERFACE_TYPE.MaximumInterfaceType = 16
KWAIT_REASON = v_enum()
KWAIT_REASON.Executive = 0
KWAIT_REASON.FreePage = 1
KWAIT_REASON.PageIn = 2
KWAIT_REASON.PoolAllocation = 3
KWAIT_REASON.DelayExecution = 4
KWAIT_REASON.Suspended = 5
KWAIT_REASON.UserRequest = 6
KWAIT_REASON.WrExecutive = 7
KWAIT_REASON.WrFreePage = 8
KWAIT_REASON.WrPageIn = 9
KWAIT_REASON.WrPoolAllocation = 10
KWAIT_REASON.WrDelayExecution = 11
KWAIT_REASON.WrSuspended = 12
KWAIT_REASON.WrUserRequest = 13
KWAIT_REASON.WrEventPair = 14
KWAIT_REASON.WrQueue = 15
KWAIT_REASON.WrLpcReceive = 16
KWAIT_REASON.WrLpcReply = 17
KWAIT_REASON.WrVirtualMemory = 18
KWAIT_REASON.WrPageOut = 19
KWAIT_REASON.WrRendezvous = 20
KWAIT_REASON.Spare2 = 21
KWAIT_REASON.Spare3 = 22
KWAIT_REASON.Spare4 = 23
KWAIT_REASON.Spare5 = 24
KWAIT_REASON.Spare6 = 25
KWAIT_REASON.WrKernel = 26
KWAIT_REASON.MaximumWaitReason = 27
ALTERNATIVE_ARCHITECTURE_TYPE = v_enum()
ALTERNATIVE_ARCHITECTURE_TYPE.StandardDesign = 0
ALTERNATIVE_ARCHITECTURE_TYPE.NEC98x86 = 1
ALTERNATIVE_ARCHITECTURE_TYPE.EndAlternatives = 2
MMLISTS = v_enum()
MMLISTS.ZeroedPageList = 0
MMLISTS.FreePageList = 1
MMLISTS.StandbyPageList = 2
MMLISTS.ModifiedPageList = 3
MMLISTS.ModifiedNoWritePageList = 4
MMLISTS.BadPageList = 5
MMLISTS.ActiveAndValid = 6
MMLISTS.TransitionPage = 7
MEMORY_TYPE = v_enum()
MEMORY_TYPE.MemoryExceptionBlock = 0
MEMORY_TYPE.MemorySystemBlock = 1
MEMORY_TYPE.MemoryFree = 2
MEMORY_TYPE.MemoryBad = 3
MEMORY_TYPE.MemoryLoadedProgram = 4
MEMORY_TYPE.MemoryFirmwareTemporary = 5
MEMORY_TYPE.MemoryFirmwarePermanent = 6
MEMORY_TYPE.MemoryFreeContiguous = 7
MEMORY_TYPE.MemorySpecialMemory = 8
MEMORY_TYPE.MemoryMaximum = 9
PS_QUOTA_TYPE = v_enum()
PS_QUOTA_TYPE.PsNonPagedPool = 0
PS_QUOTA_TYPE.PsPagedPool = 1
PS_QUOTA_TYPE.PsPageFile = 2
PS_QUOTA_TYPE.PsQuotaTypes = 3
ReplacesCorHdrNumericDefines = v_enum()
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_ILONLY = 1
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_32BITREQUIRED = 2
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_IL_LIBRARY = 4
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_STRONGNAMESIGNED = 8
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_TRACKDEBUGDATA = 65536
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR_V2 = 2
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR = 2
ReplacesCorHdrNumericDefines.COR_VERSION_MINOR = 0
ReplacesCorHdrNumericDefines.COR_DELETED_NAME_LENGTH = 8
ReplacesCorHdrNumericDefines.COR_VTABLEGAP_NAME_LENGTH = 8
ReplacesCorHdrNumericDefines.NATIVE_TYPE_MAX_CB = 1
ReplacesCorHdrNumericDefines.COR_ILMETHOD_SECT_SMALL_MAX_DATASIZE = 255
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_METHODRVA = 1
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_EHRVA = 2
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_BASICBLOCK = 8
ReplacesCorHdrNumericDefines.COR_VTABLE_32BIT = 1
ReplacesCorHdrNumericDefines.COR_VTABLE_64BIT = 2
ReplacesCorHdrNumericDefines.COR_VTABLE_FROM_UNMANAGED = 4
ReplacesCorHdrNumericDefines.COR_VTABLE_CALL_MOST_DERIVED = 16
ReplacesCorHdrNumericDefines.IMAGE_COR_EATJ_THUNK_SIZE = 32
ReplacesCorHdrNumericDefines.MAX_CLASS_NAME = 1024
ReplacesCorHdrNumericDefines.MAX_PACKAGE_NAME = 1024
ARBITER_RESULT = v_enum()
ARBITER_RESULT.ArbiterResultUndefined = -1
ARBITER_RESULT.ArbiterResultSuccess = 0
ARBITER_RESULT.ArbiterResultExternalConflict = 1
ARBITER_RESULT.ArbiterResultNullRequest = 2
SYSTEM_POWER_STATE = v_enum()
SYSTEM_POWER_STATE.PowerSystemUnspecified = 0
SYSTEM_POWER_STATE.PowerSystemWorking = 1
SYSTEM_POWER_STATE.PowerSystemSleeping1 = 2
SYSTEM_POWER_STATE.PowerSystemSleeping2 = 3
SYSTEM_POWER_STATE.PowerSystemSleeping3 = 4
SYSTEM_POWER_STATE.PowerSystemHibernate = 5
SYSTEM_POWER_STATE.PowerSystemShutdown = 6
SYSTEM_POWER_STATE.PowerSystemMaximum = 7
MEMORY_CACHING_TYPE_ORIG = v_enum()
MEMORY_CACHING_TYPE_ORIG.MmFrameBufferCached = 2
POWER_ACTION = v_enum()
POWER_ACTION.PowerActionNone = 0
POWER_ACTION.PowerActionReserved = 1
POWER_ACTION.PowerActionSleep = 2
POWER_ACTION.PowerActionHibernate = 3
POWER_ACTION.PowerActionShutdown = 4
POWER_ACTION.PowerActionShutdownReset = 5
POWER_ACTION.PowerActionShutdownOff = 6
POWER_ACTION.PowerActionWarmEject = 7
PNP_DEVNODE_STATE = v_enum()
PNP_DEVNODE_STATE.DeviceNodeUnspecified = 768
PNP_DEVNODE_STATE.DeviceNodeUninitialized = 769
PNP_DEVNODE_STATE.DeviceNodeInitialized = 770
PNP_DEVNODE_STATE.DeviceNodeDriversAdded = 771
PNP_DEVNODE_STATE.DeviceNodeResourcesAssigned = 772
PNP_DEVNODE_STATE.DeviceNodeStartPending = 773
PNP_DEVNODE_STATE.DeviceNodeStartCompletion = 774
PNP_DEVNODE_STATE.DeviceNodeStartPostWork = 775
PNP_DEVNODE_STATE.DeviceNodeStarted = 776
PNP_DEVNODE_STATE.DeviceNodeQueryStopped = 777
PNP_DEVNODE_STATE.DeviceNodeStopped = 778
PNP_DEVNODE_STATE.DeviceNodeRestartCompletion = 779
PNP_DEVNODE_STATE.DeviceNodeEnumeratePending = 780
PNP_DEVNODE_STATE.DeviceNodeEnumerateCompletion = 781
PNP_DEVNODE_STATE.DeviceNodeAwaitingQueuedDeletion = 782
PNP_DEVNODE_STATE.DeviceNodeAwaitingQueuedRemoval = 783
PNP_DEVNODE_STATE.DeviceNodeQueryRemoved = 784
PNP_DEVNODE_STATE.DeviceNodeRemovePendingCloses = 785
PNP_DEVNODE_STATE.DeviceNodeRemoved = 786
PNP_DEVNODE_STATE.DeviceNodeDeletePendingCloses = 787
PNP_DEVNODE_STATE.DeviceNodeDeleted = 788
PROFILE_STATUS = v_enum()
PROFILE_STATUS.DOCK_NOTDOCKDEVICE = 0
PROFILE_STATUS.DOCK_QUIESCENT = 1
PROFILE_STATUS.DOCK_ARRIVING = 2
PROFILE_STATUS.DOCK_DEPARTING = 3
PROFILE_STATUS.DOCK_EJECTIRP_COMPLETED = 4
MI_PFN_CACHE_ATTRIBUTE = v_enum()
MI_PFN_CACHE_ATTRIBUTE.MiNonCached = 0
MI_PFN_CACHE_ATTRIBUTE.MiCached = 1
MI_PFN_CACHE_ATTRIBUTE.MiWriteCombined = 2
MI_PFN_CACHE_ATTRIBUTE.MiNotMapped = 3
class KEXECUTE_OPTIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExecuteDisable = v_uint8()
class PCI_PMC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint8()
self.Support = PM_SUPPORT()
class _unnamed_14487(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
class _unnamed_14486(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UserData = v_ptr32()
self.Owner = v_ptr32()
class _unnamed_16779(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EndingOffset = v_ptr32()
self.ResourceToRelease = v_ptr32()
class SEGMENT_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddress = v_ptr32()
self.TotalNumberOfPtes = v_uint32()
self.SizeOfSegment = LARGE_INTEGER()
self.NonExtendedPtes = v_uint32()
self.ImageCommitment = v_uint32()
self.ControlArea = v_ptr32()
self.Subsection = v_ptr32()
self.LargeControlArea = v_ptr32()
self.MmSectionFlags = v_ptr32()
self.MmSubSectionFlags = v_ptr32()
self._pad0030 = v_bytes(size=4)
class DUAL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Map = v_ptr32()
self.SmallDir = v_ptr32()
self.Guard = v_uint32()
self.FreeDisplay = vstruct.VArray([ RTL_BITMAP() for i in xrange(24) ])
self.FreeSummary = v_uint32()
self.FreeBins = LIST_ENTRY()
class SID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.SubAuthorityCount = v_uint8()
self.IdentifierAuthority = SID_IDENTIFIER_AUTHORITY()
self.SubAuthority = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class MMPTE_HARDWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PCI_FUNCTION_RESOURCES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Limit = vstruct.VArray([ IO_RESOURCE_DESCRIPTOR() for i in xrange(7) ])
self.Current = vstruct.VArray([ CM_PARTIAL_RESOURCE_DESCRIPTOR() for i in xrange(7) ])
class _unnamed_13153(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EntireFrame = v_uint32()
class DBGKD_SET_SPECIAL_CALL64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint64()
class _unnamed_13092(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Bytes = _unnamed_14544()
class KTSS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Backlink = v_uint16()
self.Reserved0 = v_uint16()
self.Esp0 = v_uint32()
self.Ss0 = v_uint16()
self.Reserved1 = v_uint16()
self.NotUsed1 = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.CR3 = v_uint32()
self.Eip = v_uint32()
self.EFlags = v_uint32()
self.Eax = v_uint32()
self.Ecx = v_uint32()
self.Edx = v_uint32()
self.Ebx = v_uint32()
self.Esp = v_uint32()
self.Ebp = v_uint32()
self.Esi = v_uint32()
self.Edi = v_uint32()
self.Es = v_uint16()
self.Reserved2 = v_uint16()
self.Cs = v_uint16()
self.Reserved3 = v_uint16()
self.Ss = v_uint16()
self.Reserved4 = v_uint16()
self.Ds = v_uint16()
self.Reserved5 = v_uint16()
self.Fs = v_uint16()
self.Reserved6 = v_uint16()
self.Gs = v_uint16()
self.Reserved7 = v_uint16()
self.LDT = v_uint16()
self.Reserved8 = v_uint16()
self.Flags = v_uint16()
self.IoMapBase = v_uint16()
self.IoMaps = vstruct.VArray([ KiIoAccessMap() for i in xrange(1) ])
self.IntDirectionMap = vstruct.VArray([ v_uint8() for i in xrange(32) ])
class CURDIR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DosPath = UNICODE_STRING()
self.Handle = v_ptr32()
class DBGKD_GET_INTERNAL_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint32()
self.Flags = v_uint32()
self.Calls = v_uint32()
self.MaxCallsPerPeriod = v_uint32()
self.MinInstructions = v_uint32()
self.MaxInstructions = v_uint32()
self.TotalInstructions = v_uint32()
class DBGKD_MANIPULATE_STATE32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApiNumber = v_uint32()
self.ProcessorLevel = v_uint16()
self.Processor = v_uint16()
self.ReturnStatus = v_uint32()
self.u = _unnamed_11882()
class _unnamed_11075(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self._pad0028 = v_bytes(size=32)
class PROCESSOR_POWER_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint32()
self.DynamicThrottle = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.DisableCStates = v_uint32()
self.PolicyCount = v_uint32()
self.Policy = vstruct.VArray([ PROCESSOR_POWER_POLICY_INFO() for i in xrange(3) ])
class _unnamed_11597(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Long = v_uint32()
class _unnamed_12520(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class BITMAP_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.BasePage = v_uint64()
self.FirstDirtyPage = v_uint32()
self.LastDirtyPage = v_uint32()
self.DirtyPages = v_uint32()
self.Bitmap = v_ptr32()
class HARDWARE_PTE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class HANDLE_TABLE_ENTRY_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AuditMask = v_uint32()
class DBGKD_WRITE_MEMORY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint32()
self.TransferCount = v_uint32()
self.ActualBytesWritten = v_uint32()
class _unnamed_13252(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_ptr32()
class PCI_INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_ptr32()
self.MinSize = v_uint16()
self.MinVersion = v_uint16()
self.MaxVersion = v_uint16()
self.Flags = v_uint16()
self.ReferenceCount = v_uint32()
self.Signature = v_uint32()
self.Constructor = v_ptr32()
self.Initializer = v_ptr32()
class _unnamed_16629(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceId = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class MMWSLENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class _unnamed_12976(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AsynchronousParameters = _unnamed_14745()
class CM_PARTIAL_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Revision = v_uint16()
self.Count = v_uint32()
self.PartialDescriptors = vstruct.VArray([ CM_PARTIAL_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class DBGKD_RESTORE_BREAKPOINT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointHandle = v_uint32()
class DEVICE_CAPABILITIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.DeviceD1 = v_uint32()
self.Address = v_uint32()
self.UINumber = v_uint32()
self.DeviceState = vstruct.VArray([ DEVICE_POWER_STATE() for i in xrange(7) ])
self.SystemWake = v_uint32()
self.DeviceWake = v_uint32()
self.D1Latency = v_uint32()
self.D2Latency = v_uint32()
self.D3Latency = v_uint32()
class _unnamed_12973(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MasterIrp = v_ptr32()
class _unnamed_16624(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClassGuid = GUID()
self.SymbolicLinkName = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0014 = v_bytes(size=2)
class _unnamed_16310(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.MinBusNumber = v_uint32()
self.MaxBusNumber = v_uint32()
self.Reserved = v_uint32()
class _unnamed_16315(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Priority = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class EXCEPTION_RECORD64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_uint64()
self.ExceptionAddress = v_uint64()
self.NumberParameters = v_uint32()
self.unusedAlignment = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint64() for i in xrange(15) ])
class _unnamed_16250(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProviderId = v_uint32()
self.DataPath = v_ptr32()
self.BufferSize = v_uint32()
self.Buffer = v_ptr32()
class KPROCESS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.ProfileListHead = LIST_ENTRY()
self.DirectoryTableBase = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.LdtDescriptor = KGDTENTRY()
self.Int21Descriptor = KIDTENTRY()
self.IopmOffset = v_uint16()
self.Iopl = v_uint8()
self.Unused = v_uint8()
self.ActiveProcessors = v_uint32()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.ReadyListHead = LIST_ENTRY()
self.SwapListEntry = SINGLE_LIST_ENTRY()
self.VdmTrapcHandler = v_ptr32()
self.ThreadListHead = LIST_ENTRY()
self.ProcessLock = v_uint32()
self.Affinity = v_uint32()
self.StackCount = v_uint16()
self.BasePriority = v_uint8()
self.ThreadQuantum = v_uint8()
self.AutoAlignment = v_uint8()
self.State = v_uint8()
self.ThreadSeed = v_uint8()
self.DisableBoost = v_uint8()
self.PowerState = v_uint8()
self.DisableQuantum = v_uint8()
self.IdealNode = v_uint8()
self.Flags = KEXECUTE_OPTIONS()
class DEVICE_OBJECT_POWER_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdleCount = v_uint32()
self.ConservationIdleTime = v_uint32()
self.PerformanceIdleTime = v_uint32()
self.DeviceObject = v_ptr32()
self.IdleList = LIST_ENTRY()
self.DeviceType = v_uint8()
self._pad001c = v_bytes(size=3)
self.State = v_uint32()
self.NotifySourceList = LIST_ENTRY()
self.NotifyTargetList = LIST_ENTRY()
self.PowerChannelSummary = POWER_CHANNEL_SUMMARY()
self.Volume = LIST_ENTRY()
class MMPTE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class HEAP_TAG_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Allocs = v_uint32()
self.Frees = v_uint32()
self.Size = v_uint32()
self.TagIndex = v_uint16()
self.CreatorBackTraceIndex = v_uint16()
self.TagName = vstruct.VArray([ v_uint16() for i in xrange(24) ])
class VI_POOL_ENTRY_INUSE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_ptr32()
self.CallingAddress = v_ptr32()
self.NumberOfBytes = v_uint32()
self.Tag = v_uint32()
class HEAP_LOOKASIDE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = SLIST_HEADER()
self.Depth = v_uint16()
self.MaximumDepth = v_uint16()
self.TotalAllocates = v_uint32()
self.AllocateMisses = v_uint32()
self.TotalFrees = v_uint32()
self.FreeMisses = v_uint32()
self.LastTotalAllocates = v_uint32()
self.LastAllocateMisses = v_uint32()
self.Counters = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0030 = v_bytes(size=4)
class MMPTE_TRANSITION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class _unnamed_16247(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatedResources = v_ptr32()
self.AllocatedResourcesTranslated = v_ptr32()
class OBJECT_HANDLE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HandleAttributes = v_uint32()
self.GrantedAccess = v_uint32()
class OWNER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwnerThread = v_uint32()
self.OwnerCount = v_uint32()
class DEVOBJ_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.PowerFlags = v_uint32()
self.Dope = v_ptr32()
self.ExtensionFlags = v_uint32()
self.DeviceNode = v_ptr32()
self.AttachedTo = v_ptr32()
self.StartIoCount = v_uint32()
self.StartIoKey = v_uint32()
self.StartIoFlags = v_uint32()
self.Vpb = v_ptr32()
class _unnamed_14357(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.bits = _unnamed_16509()
class ARBITER_ALLOCATION_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.CurrentMinimum = v_uint64()
self.CurrentMaximum = v_uint64()
self.Entry = v_ptr32()
self.CurrentAlternative = v_ptr32()
self.AlternativeCount = v_uint32()
self.Alternatives = v_ptr32()
self.Flags = v_uint16()
self.RangeAttributes = v_uint8()
self.RangeAvailableAttributes = v_uint8()
self.WorkSpace = v_uint32()
class DBGKD_SET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self._pad0010 = v_bytes(size=4)
class _unnamed_16089(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.EaList = v_ptr32()
self.EaListLength = v_uint32()
self.EaIndex = v_uint32()
class MM_DRIVER_VERIFIER_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
self.RaiseIrqls = v_uint32()
self.AcquireSpinLocks = v_uint32()
self.SynchronizeExecutions = v_uint32()
self.AllocationsAttempted = v_uint32()
self.AllocationsSucceeded = v_uint32()
self.AllocationsSucceededSpecialPool = v_uint32()
self.AllocationsWithNoTag = v_uint32()
self.TrimRequests = v_uint32()
self.Trims = v_uint32()
self.AllocationsFailed = v_uint32()
self.AllocationsFailedDeliberately = v_uint32()
self.Loads = v_uint32()
self.Unloads = v_uint32()
self.UnTrackedPool = v_uint32()
self.UserTrims = v_uint32()
self.CurrentPagedPoolAllocations = v_uint32()
self.CurrentNonPagedPoolAllocations = v_uint32()
self.PeakPagedPoolAllocations = v_uint32()
self.PeakNonPagedPoolAllocations = v_uint32()
self.PagedBytes = v_uint32()
self.NonPagedBytes = v_uint32()
self.PeakPagedBytes = v_uint32()
self.PeakNonPagedBytes = v_uint32()
self.BurstAllocationsFailedDeliberately = v_uint32()
self.SessionTrims = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(2) ])
class PI_BUS_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.NumberCSNs = v_uint32()
self.ReadDataPort = v_ptr32()
self.DataPortMapped = v_uint8()
self._pad0010 = v_bytes(size=3)
self.AddressPort = v_ptr32()
self.AddrPortMapped = v_uint8()
self._pad0018 = v_bytes(size=3)
self.CommandPort = v_ptr32()
self.CmdPortMapped = v_uint8()
self._pad0020 = v_bytes(size=3)
self.NextSlotNumber = v_uint32()
self.DeviceList = SINGLE_LIST_ENTRY()
self.CardList = SINGLE_LIST_ENTRY()
self.PhysicalBusDevice = v_ptr32()
self.FunctionalBusDevice = v_ptr32()
self.AttachedDevice = v_ptr32()
self.BusNumber = v_uint32()
self.SystemPowerState = v_uint32()
self.DevicePowerState = v_uint32()
class MAILSLOT_CREATE_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MailslotQuota = v_uint32()
self.MaximumMessageSize = v_uint32()
self.ReadTimeout = LARGE_INTEGER()
self.TimeoutSpecified = v_uint8()
self._pad0018 = v_bytes(size=7)
class FS_FILTER_CALLBACK_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbackData = v_uint32()
self.Operation = v_uint8()
self.Reserved = v_uint8()
self._pad0008 = v_bytes(size=2)
self.DeviceObject = v_ptr32()
self.FileObject = v_ptr32()
self.Parameters = FS_FILTER_PARAMETERS()
class ACCESS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OperationID = LUID()
self.SecurityEvaluated = v_uint8()
self.GenerateAudit = v_uint8()
self.GenerateOnClose = v_uint8()
self.PrivilegesAllocated = v_uint8()
self.Flags = v_uint32()
self.RemainingDesiredAccess = v_uint32()
self.PreviouslyGrantedAccess = v_uint32()
self.OriginalDesiredAccess = v_uint32()
self.SubjectSecurityContext = SECURITY_SUBJECT_CONTEXT()
self.SecurityDescriptor = v_ptr32()
self.AuxData = v_ptr32()
self.Privileges = _unnamed_14065()
self.AuditPrivileges = v_uint8()
self._pad0064 = v_bytes(size=3)
self.ObjectName = UNICODE_STRING()
self.ObjectTypeName = UNICODE_STRING()
class FILE_STANDARD_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.NumberOfLinks = v_uint32()
self.DeletePending = v_uint8()
self.Directory = v_uint8()
self._pad0018 = v_bytes(size=2)
class EX_PUSH_LOCK_CACHE_AWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Locks = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class POOL_BLOCK_HEAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = POOL_HEADER()
self.List = LIST_ENTRY()
class DBGKD_SET_SPECIAL_CALL32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint32()
class SYSTEM_POWER_LEVEL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Enable = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.BatteryLevel = v_uint32()
self.PowerPolicy = POWER_ACTION_POLICY()
self.MinSystemState = v_uint32()
class DBGKD_LOAD_SYMBOLS32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PathNameLength = v_uint32()
self.BaseOfDll = v_uint32()
self.ProcessId = v_uint32()
self.CheckSum = v_uint32()
self.SizeOfImage = v_uint32()
self.UnloadSymbols = v_uint8()
self._pad0018 = v_bytes(size=3)
class DBGKM_EXCEPTION32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = EXCEPTION_RECORD32()
self.FirstChance = v_uint32()
class PAGEFAULT_HISTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentIndex = v_uint32()
self.MaxIndex = v_uint32()
self.SpinLock = v_uint32()
self.Reserved = v_ptr32()
self.WatchInfo = vstruct.VArray([ PROCESS_WS_WATCH_INFORMATION() for i in xrange(1) ])
class _unnamed_16107(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FsInformationClass = v_uint32()
class WNODE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BufferSize = v_uint32()
self.ProviderId = v_uint32()
self.HistoricalContext = v_uint64()
self.CountLost = v_uint32()
self._pad0018 = v_bytes(size=4)
self.Guid = GUID()
self.ClientContext = v_uint32()
self.Flags = v_uint32()
class PROCESS_WS_WATCH_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FaultingPc = v_ptr32()
self.FaultingVa = v_ptr32()
class SECTION_OBJECT_POINTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSectionObject = v_ptr32()
self.SharedCacheMap = v_ptr32()
self.ImageSectionObject = v_ptr32()
class MDL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Size = v_uint16()
self.MdlFlags = v_uint16()
self.Process = v_ptr32()
self.MappedSystemVa = v_ptr32()
self.StartVa = v_ptr32()
self.ByteCount = v_uint32()
self.ByteOffset = v_uint32()
class KTRAP_FRAME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DbgEbp = v_uint32()
self.DbgEip = v_uint32()
self.DbgArgMark = v_uint32()
self.DbgArgPointer = v_uint32()
self.TempSegCs = v_uint32()
self.TempEsp = v_uint32()
self.Dr0 = v_uint32()
self.Dr1 = v_uint32()
self.Dr2 = v_uint32()
self.Dr3 = v_uint32()
self.Dr6 = v_uint32()
self.Dr7 = v_uint32()
self.SegGs = v_uint32()
self.SegEs = v_uint32()
self.SegDs = v_uint32()
self.Edx = v_uint32()
self.Ecx = v_uint32()
self.Eax = v_uint32()
self.PreviousPreviousMode = v_uint32()
self.ExceptionList = v_ptr32()
self.SegFs = v_uint32()
self.Edi = v_uint32()
self.Esi = v_uint32()
self.Ebx = v_uint32()
self.Ebp = v_uint32()
self.ErrCode = v_uint32()
self.Eip = v_uint32()
self.SegCs = v_uint32()
self.EFlags = v_uint32()
self.HardwareEsp = v_uint32()
self.HardwareSegSs = v_uint32()
self.V86Es = v_uint32()
self.V86Ds = v_uint32()
self.V86Fs = v_uint32()
self.V86Gs = v_uint32()
class CM_INDEX_HINT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.HashKey = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class SEP_AUDIT_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PolicyElements = SEP_AUDIT_POLICY_CATEGORIES()
class MMPTE_SOFTWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class IO_TIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.TimerFlag = v_uint16()
self.TimerList = LIST_ENTRY()
self.TimerRoutine = v_ptr32()
self.Context = v_ptr32()
self.DeviceObject = v_ptr32()
class Wx86ThreadState(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CallBx86Eip = v_ptr32()
self.DeallocationCpu = v_ptr32()
self.UseKnownWx86Dll = v_uint8()
self.OleStubInvoked = v_uint8()
self._pad000c = v_bytes(size=2)
class _unnamed_12112(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FreeListsInUseTerminate = v_uint16()
class _unnamed_12111(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FreeListsInUseUlong = vstruct.VArray([ v_uint32() for i in xrange(4) ])
class _unnamed_16218(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceTextType = v_uint32()
self.LocaleId = v_uint32()
class MM_SESSION_SPACE_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Initialized = v_uint32()
class _unnamed_14629(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.type0 = PCI_HEADER_TYPE_0()
class EVENT_COUNTER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = SINGLE_LIST_ENTRY()
self.RefCount = v_uint32()
self.Event = KEVENT()
class SECURITY_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.Sbz1 = v_uint8()
self.Control = v_uint16()
self.Owner = v_ptr32()
self.Group = v_ptr32()
self.Sacl = v_ptr32()
self.Dacl = v_ptr32()
class SECURITY_TOKEN_AUDIT_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.GrantMask = v_uint32()
self.DenyMask = v_uint32()
class EX_WORK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WorkerQueue = KQUEUE()
self.DynamicThreadCount = v_uint32()
self.WorkItemsProcessed = v_uint32()
self.WorkItemsProcessedLastPass = v_uint32()
self.QueueDepthLastPass = v_uint32()
self.Info = EX_QUEUE_WORKER_INFO()
class OBJECT_TYPE_INITIALIZER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.UseDefaultObject = v_uint8()
self.CaseInsensitive = v_uint8()
self.InvalidAttributes = v_uint32()
self.GenericMapping = GENERIC_MAPPING()
self.ValidAccessMask = v_uint32()
self.SecurityRequired = v_uint8()
self.MaintainHandleCount = v_uint8()
self.MaintainTypeList = v_uint8()
self._pad0020 = v_bytes(size=1)
self.PoolType = v_uint32()
self.DefaultPagedPoolCharge = v_uint32()
self.DefaultNonPagedPoolCharge = v_uint32()
self.DumpProcedure = v_ptr32()
self.OpenProcedure = v_ptr32()
self.CloseProcedure = v_ptr32()
self.DeleteProcedure = v_ptr32()
self.ParseProcedure = v_ptr32()
self.SecurityProcedure = v_ptr32()
self.QueryNameProcedure = v_ptr32()
self.OkayToCloseProcedure = v_ptr32()
class VACB_LEVEL_REFERENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Reference = v_uint32()
self.SpecialReference = v_uint32()
class _unnamed_16627(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceIds = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class HEAP_ENTRY_EXTRA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatorBackTraceIndex = v_uint16()
self.TagIndex = v_uint16()
self.Settable = v_uint32()
class POP_DEVICE_SYS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IrpMinor = v_uint8()
self._pad0004 = v_bytes(size=3)
self.SystemState = v_uint32()
self.Event = KEVENT()
self.SpinLock = v_uint32()
self.Thread = v_ptr32()
self.GetNewDeviceList = v_uint8()
self._pad0024 = v_bytes(size=3)
self.Order = PO_DEVICE_NOTIFY_ORDER()
self.Status = v_uint32()
self.FailedDevice = v_ptr32()
self.Waking = v_uint8()
self.Cancelled = v_uint8()
self.IgnoreErrors = v_uint8()
self.IgnoreNotImplemented = v_uint8()
self.WaitAny = v_uint8()
self.WaitAll = v_uint8()
self._pad027c = v_bytes(size=2)
self.PresentIrpQueue = LIST_ENTRY()
self.Head = POP_DEVICE_POWER_IRP()
self.PowerIrpState = vstruct.VArray([ POP_DEVICE_POWER_IRP() for i in xrange(20) ])
class VI_DEADLOCK_RESOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.NodeCount = v_uint32()
self.ResourceAddress = v_ptr32()
self.ThreadOwner = v_ptr32()
self.ResourceList = LIST_ENTRY()
self.HashChainList = LIST_ENTRY()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.LastAcquireTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.LastReleaseTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
class HEAP_PSEUDO_TAG_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Allocs = v_uint32()
self.Frees = v_uint32()
self.Size = v_uint32()
class _unnamed_13834(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Generic = _unnamed_14637()
class CM_KEY_REFERENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeyCell = v_uint32()
self.KeyHive = v_ptr32()
class MMSECTION_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BeingDeleted = v_uint32()
class IA64_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Continue = v_uint32()
self.CurrentSymbolStart = v_uint64()
self.CurrentSymbolEnd = v_uint64()
class DBGKD_GET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self.Calls = v_uint32()
self.MaxCallsPerPeriod = v_uint32()
self.MinInstructions = v_uint32()
self.MaxInstructions = v_uint32()
self.TotalInstructions = v_uint32()
class PROCESSOR_POWER_POLICY_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TimeCheck = v_uint32()
self.DemoteLimit = v_uint32()
self.PromoteLimit = v_uint32()
self.DemotePercent = v_uint8()
self.PromotePercent = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.AllowDemotion = v_uint32()
class _unnamed_16213(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdType = v_uint32()
class POP_POWER_ACTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Updates = v_uint8()
self.State = v_uint8()
self.Shutdown = v_uint8()
self._pad0004 = v_bytes(size=1)
self.Action = v_uint32()
self.LightestState = v_uint32()
self.Flags = v_uint32()
self.Status = v_uint32()
self.IrpMinor = v_uint8()
self._pad0018 = v_bytes(size=3)
self.SystemState = v_uint32()
self.NextSystemState = v_uint32()
self.ShutdownBugCode = v_ptr32()
self.DevState = v_ptr32()
self.HiberContext = v_ptr32()
self.LastWakeState = v_uint32()
self.WakeTime = v_uint64()
self.SleepTime = v_uint64()
class OBJECT_CREATE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Attributes = v_uint32()
self.RootDirectory = v_ptr32()
self.ParseContext = v_ptr32()
self.ProbeMode = v_uint8()
self._pad0010 = v_bytes(size=3)
self.PagedPoolCharge = v_uint32()
self.NonPagedPoolCharge = v_uint32()
self.SecurityDescriptorCharge = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.SecurityQos = v_ptr32()
self.SecurityQualityOfService = SECURITY_QUALITY_OF_SERVICE()
class OBJECT_HEADER_CREATOR_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TypeList = LIST_ENTRY()
self.CreatorUniqueProcess = v_ptr32()
self.CreatorBackTraceIndex = v_uint16()
self.Reserved = v_uint16()
class PAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = FAST_MUTEX()
self._pad0100 = v_bytes(size=96)
class HEAP_STOP_ON_TAG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HeapAndTagIndex = v_uint32()
class PO_NOTIFY_ORDER_LEVEL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LevelReady = KEVENT()
self.DeviceCount = v_uint32()
self.ActiveCount = v_uint32()
self.WaitSleep = LIST_ENTRY()
self.ReadySleep = LIST_ENTRY()
self.Pending = LIST_ENTRY()
self.Complete = LIST_ENTRY()
self.ReadyS0 = LIST_ENTRY()
self.WaitS0 = LIST_ENTRY()
class RTL_BITMAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfBitMap = v_uint32()
self.Buffer = v_ptr32()
class LARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_12162(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CriticalSection = RTL_CRITICAL_SECTION()
self._pad0038 = v_bytes(size=32)
class NPAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = v_uint32()
self._pad0100 = v_bytes(size=124)
class _unnamed_11794(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadMemory = DBGKD_READ_MEMORY64()
self._pad0028 = v_bytes(size=24)
class KLOCK_QUEUE_HANDLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LockQueue = KSPIN_LOCK_QUEUE()
self.OldIrql = v_uint8()
self._pad000c = v_bytes(size=3)
class VPB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.Flags = v_uint16()
self.VolumeLabelLength = v_uint16()
self.DeviceObject = v_ptr32()
self.RealDevice = v_ptr32()
self.SerialNumber = v_uint32()
self.ReferenceCount = v_uint32()
self.VolumeLabel = vstruct.VArray([ v_uint16() for i in xrange(32) ])
class SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlArea = v_ptr32()
self.TotalNumberOfPtes = v_uint32()
self.NonExtendedPtes = v_uint32()
self.WritableUserReferences = v_uint32()
self.SizeOfSegment = v_uint64()
self.SegmentPteTemplate = MMPTE()
self.NumberOfCommittedPages = v_uint32()
self.ExtendInfo = v_ptr32()
self.SystemImageBase = v_ptr32()
self.BasedAddress = v_ptr32()
self.u1 = _unnamed_12605()
self.u2 = _unnamed_12606()
self.PrototypePte = v_ptr32()
self.ThePtes = vstruct.VArray([ MMPTE() for i in xrange(1) ])
self._pad0040 = v_bytes(size=4)
class _unnamed_15247(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TestAllocation = _unnamed_16554()
self._pad0010 = v_bytes(size=4)
class PP_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.P = v_ptr32()
self.L = v_ptr32()
class OBJECT_NAME_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = UNICODE_STRING()
class IO_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Revision = v_uint16()
self.Count = v_uint32()
self.Descriptors = vstruct.VArray([ IO_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class _unnamed_16445(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PageNo = v_uint32()
self.StartPage = v_uint32()
self.EndPage = v_uint32()
self.CheckSum = v_uint32()
class _unnamed_16446(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.NextTable = v_uint32()
self.CheckSum = v_uint32()
self.EntryCount = v_uint32()
class PRIVATE_CACHE_MAP_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DontUse = v_uint32()
class FS_FILTER_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AcquireForModifiedPageWriter = _unnamed_16779()
self._pad0014 = v_bytes(size=12)
class HEAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.Signature = v_uint32()
self.Flags = v_uint32()
self.ForceFlags = v_uint32()
self.VirtualMemoryThreshold = v_uint32()
self.SegmentReserve = v_uint32()
self.SegmentCommit = v_uint32()
self.DeCommitFreeBlockThreshold = v_uint32()
self.DeCommitTotalFreeThreshold = v_uint32()
self.TotalFreeSize = v_uint32()
self.MaximumAllocationSize = v_uint32()
self.ProcessHeapsListIndex = v_uint16()
self.HeaderValidateLength = v_uint16()
self.HeaderValidateCopy = v_ptr32()
self.NextAvailableTagIndex = v_uint16()
self.MaximumTagIndex = v_uint16()
self.TagEntries = v_ptr32()
self.UCRSegments = v_ptr32()
self.UnusedUnCommittedRanges = v_ptr32()
self.AlignRound = v_uint32()
self.AlignMask = v_uint32()
self.VirtualAllocdBlocks = LIST_ENTRY()
self.Segments = vstruct.VArray([ v_ptr32() for i in xrange(64) ])
self.u = _unnamed_12111()
self.u2 = _unnamed_12112()
self.AllocatorBackTraceIndex = v_uint16()
self.NonDedicatedListLength = v_uint32()
self.LargeBlocksIndex = v_ptr32()
self.PseudoTagEntries = v_ptr32()
self.FreeLists = vstruct.VArray([ LIST_ENTRY() for i in xrange(128) ])
self.LockVariable = v_ptr32()
self.CommitRoutine = v_ptr32()
self.FrontEndHeap = v_ptr32()
self.FrontHeapLockCount = v_uint16()
self.FrontEndHeapType = v_uint8()
self.LastSegmentIndex = v_uint8()
class HANDLE_TRACE_DEBUG_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentStackIndex = v_uint32()
self.TraceDb = vstruct.VArray([ HANDLE_TRACE_DB_ENTRY() for i in xrange(4096) ])
class PRIVILEGE_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrivilegeCount = v_uint32()
self.Control = v_uint32()
self.Privilege = vstruct.VArray([ LUID_AND_ATTRIBUTES() for i in xrange(1) ])
class CM_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.List = vstruct.VArray([ CM_FULL_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class EPROCESS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Pcb = KPROCESS()
self.ProcessLock = EX_PUSH_LOCK()
self.CreateTime = LARGE_INTEGER()
self.ExitTime = LARGE_INTEGER()
self.RundownProtect = EX_RUNDOWN_REF()
self.UniqueProcessId = v_ptr32()
self.ActiveProcessLinks = LIST_ENTRY()
self.QuotaUsage = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.QuotaPeak = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.CommitCharge = v_uint32()
self.PeakVirtualSize = v_uint32()
self.VirtualSize = v_uint32()
self.SessionProcessLinks = LIST_ENTRY()
self.DebugPort = v_ptr32()
self.ExceptionPort = v_ptr32()
self.ObjectTable = v_ptr32()
self.Token = EX_FAST_REF()
self.WorkingSetLock = FAST_MUTEX()
self.WorkingSetPage = v_uint32()
self.AddressCreationLock = FAST_MUTEX()
self.HyperSpaceLock = v_uint32()
self.ForkInProgress = v_ptr32()
self.HardwareTrigger = v_uint32()
self.VadRoot = v_ptr32()
self.VadHint = v_ptr32()
self.CloneRoot = v_ptr32()
self.NumberOfPrivatePages = v_uint32()
self.NumberOfLockedPages = v_uint32()
self.Win32Process = v_ptr32()
self.Job = v_ptr32()
self.SectionObject = v_ptr32()
self.SectionBaseAddress = v_ptr32()
self.QuotaBlock = v_ptr32()
self.WorkingSetWatch = v_ptr32()
self.Win32WindowStation = v_ptr32()
self.InheritedFromUniqueProcessId = v_ptr32()
self.LdtInformation = v_ptr32()
self.VadFreeHint = v_ptr32()
self.VdmObjects = v_ptr32()
self.DeviceMap = v_ptr32()
self.PhysicalVadList = LIST_ENTRY()
self.PageDirectoryPte = HARDWARE_PTE()
self._pad0170 = v_bytes(size=4)
self.Session = v_ptr32()
self.ImageFileName = vstruct.VArray([ v_uint8() for i in xrange(16) ])
self.JobLinks = LIST_ENTRY()
self.LockedPagesList = v_ptr32()
self.ThreadListHead = LIST_ENTRY()
self.SecurityPort = v_ptr32()
self.PaeTop = v_ptr32()
self.ActiveThreads = v_uint32()
self.GrantedAccess = v_uint32()
self.DefaultHardErrorProcessing = v_uint32()
self.LastThreadExitStatus = v_uint32()
self.Peb = v_ptr32()
self.PrefetchTrace = EX_FAST_REF()
self.ReadOperationCount = LARGE_INTEGER()
self.WriteOperationCount = LARGE_INTEGER()
self.OtherOperationCount = LARGE_INTEGER()
self.ReadTransferCount = LARGE_INTEGER()
self.WriteTransferCount = LARGE_INTEGER()
self.OtherTransferCount = LARGE_INTEGER()
self.CommitChargeLimit = v_uint32()
self.CommitChargePeak = v_uint32()
self.AweInfo = v_ptr32()
self.SeAuditProcessCreationInfo = SE_AUDIT_PROCESS_CREATION_INFO()
self.Vm = MMSUPPORT()
self.LastFaultCount = v_uint32()
self.ModifiedPageCount = v_uint32()
self.NumberOfVads = v_uint32()
self.JobStatus = v_uint32()
self.Flags = v_uint32()
self.ExitStatus = v_uint32()
self.NextPageColor = v_uint16()
self.SubSystemMinorVersion = v_uint8()
self.SubSystemMajorVersion = v_uint8()
self.PriorityClass = v_uint8()
self.WorkingSetAcquiredUnsafe = v_uint8()
self._pad0258 = v_bytes(size=2)
self.Cookie = v_uint32()
self._pad0260 = v_bytes(size=4)
class PHYSICAL_MEMORY_RUN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BasePage = v_uint32()
self.PageCount = v_uint32()
class CM_KEY_BODY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.KeyControlBlock = v_ptr32()
self.NotifyBlock = v_ptr32()
self.ProcessID = v_ptr32()
self.Callers = v_uint32()
self.CallerAddress = vstruct.VArray([ v_ptr32() for i in xrange(10) ])
self.KeyBodyList = LIST_ENTRY()
class KMUTANT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.MutantListEntry = LIST_ENTRY()
self.OwnerThread = v_ptr32()
self.Abandoned = v_uint8()
self.ApcDisable = v_uint8()
self._pad0020 = v_bytes(size=2)
class FX_SAVE_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.U = _unnamed_10880()
self.NpxSavedCpu = v_uint32()
self.Cr0NpxState = v_uint32()
class POWER_SEQUENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SequenceD1 = v_uint32()
self.SequenceD2 = v_uint32()
self.SequenceD3 = v_uint32()
class KTIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.DueTime = ULARGE_INTEGER()
self.TimerListEntry = LIST_ENTRY()
self.Dpc = v_ptr32()
self.Period = v_uint32()
class MM_PAGED_POOL_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PagedPoolAllocationMap = v_ptr32()
self.EndOfPagedPoolBitmap = v_ptr32()
self.PagedPoolLargeSessionAllocationMap = v_ptr32()
self.FirstPteForPagedPool = v_ptr32()
self.LastPteForPagedPool = v_ptr32()
self.NextPdeForPagedPoolExpansion = v_ptr32()
self.PagedPoolHint = v_uint32()
self.PagedPoolCommit = v_uint32()
self.AllocatedPagedPool = v_uint32()
class HIVE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_ptr32()
self.BaseName = v_ptr32()
self.CmHive = v_ptr32()
self.Flags = v_uint32()
self.CmHive2 = v_ptr32()
self.ThreadFinished = v_uint8()
self.ThreadStarted = v_uint8()
self.Allocate = v_uint8()
self._pad0018 = v_bytes(size=1)
class CM_PARTIAL_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint8()
self.ShareDisposition = v_uint8()
self.Flags = v_uint16()
self.u = _unnamed_13834()
class RTLP_RANGE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.Allocated = _unnamed_14486()
self.Attributes = v_uint8()
self.PublicFlags = v_uint8()
self.PrivateFlags = v_uint16()
self.ListEntry = LIST_ENTRY()
self._pad0028 = v_bytes(size=4)
class _unnamed_14765(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceQueueEntry = KDEVICE_QUEUE_ENTRY()
self.Thread = v_ptr32()
self.AuxiliaryBuffer = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.CurrentStackLocation = v_ptr32()
self.OriginalFileObject = v_ptr32()
class _unnamed_14762(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Create = _unnamed_15988()
class _unnamed_13383(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CellData = CELL_DATA()
class MMVAD_LONG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
self.ControlArea = v_ptr32()
self.FirstPrototypePte = v_ptr32()
self.LastContiguousPte = v_ptr32()
self.u2 = _unnamed_14103()
self.u3 = _unnamed_14104()
self.u4 = _unnamed_14105()
class CM_VIEW_OF_FILE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LRUViewList = LIST_ENTRY()
self.PinViewList = LIST_ENTRY()
self.FileOffset = v_uint32()
self.Size = v_uint32()
self.ViewAddress = v_ptr32()
self.Bcb = v_ptr32()
self.UseCount = v_uint32()
class _unnamed_16143(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.StartSid = v_ptr32()
self.SidList = v_ptr32()
self.SidListLength = v_uint32()
class CM_FULL_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.PartialResourceList = CM_PARTIAL_RESOURCE_LIST()
class DBGKD_WRITE_MEMORY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint64()
self.TransferCount = v_uint32()
self.ActualBytesWritten = v_uint32()
class DBGKD_GET_VERSION64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.ProtocolVersion = v_uint16()
self.Flags = v_uint16()
self.MachineType = v_uint16()
self.MaxPacketType = v_uint8()
self.MaxStateChange = v_uint8()
self.MaxManipulate = v_uint8()
self.Simulation = v_uint8()
self.Unused = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self.KernBase = v_uint64()
self.PsLoadedModuleList = v_uint64()
self.DebuggerDataList = v_uint64()
class _unnamed_16069(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileName = v_ptr32()
self.FileInformationClass = v_uint32()
self.FileIndex = v_uint32()
class FAST_IO_DISPATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFastIoDispatch = v_uint32()
self.FastIoCheckIfPossible = v_ptr32()
self.FastIoRead = v_ptr32()
self.FastIoWrite = v_ptr32()
self.FastIoQueryBasicInfo = v_ptr32()
self.FastIoQueryStandardInfo = v_ptr32()
self.FastIoLock = v_ptr32()
self.FastIoUnlockSingle = v_ptr32()
self.FastIoUnlockAll = v_ptr32()
self.FastIoUnlockAllByKey = v_ptr32()
self.FastIoDeviceControl = v_ptr32()
self.AcquireFileForNtCreateSection = v_ptr32()
self.ReleaseFileForNtCreateSection = v_ptr32()
self.FastIoDetachDevice = v_ptr32()
self.FastIoQueryNetworkOpenInfo = v_ptr32()
self.AcquireForModWrite = v_ptr32()
self.MdlRead = v_ptr32()
self.MdlReadComplete = v_ptr32()
self.PrepareMdlWrite = v_ptr32()
self.MdlWriteComplete = v_ptr32()
self.FastIoReadCompressed = v_ptr32()
self.FastIoWriteCompressed = v_ptr32()
self.MdlReadCompleteCompressed = v_ptr32()
self.MdlWriteCompleteCompressed = v_ptr32()
self.FastIoQueryOpen = v_ptr32()
self.ReleaseForModWrite = v_ptr32()
self.AcquireForCcFlush = v_ptr32()
self.ReleaseForCcFlush = v_ptr32()
class CM_KEY_CONTROL_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.RefCount = v_uint32()
self.ExtFlags = v_uint32()
self.KeyHash = CM_KEY_HASH()
self.ParentKcb = v_ptr32()
self.NameBlock = v_ptr32()
self.CachedSecurity = v_ptr32()
self.ValueCache = CACHED_CHILD_LIST()
self.IndexHint = v_ptr32()
self.KeyBodyListHead = LIST_ENTRY()
self.KcbLastWriteTime = LARGE_INTEGER()
self.KcbMaxNameLen = v_uint16()
self.KcbMaxValueNameLen = v_uint16()
self.KcbMaxValueDataLen = v_uint32()
self.KcbUserFlags = v_uint32()
self._pad0050 = v_bytes(size=4)
class MMVAD_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CommitCharge = v_uint32()
class MMWSL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Quota = v_uint32()
self.FirstFree = v_uint32()
self.FirstDynamic = v_uint32()
self.LastEntry = v_uint32()
self.NextSlot = v_uint32()
self.Wsle = v_ptr32()
self.LastInitializedWsle = v_uint32()
self.NonDirectCount = v_uint32()
self.HashTable = v_ptr32()
self.HashTableSize = v_uint32()
self.NumberOfCommittedPageTables = v_uint32()
self.HashTableStart = v_ptr32()
self.HighestPermittedHashAddress = v_ptr32()
self.NumberOfImageWaiters = v_uint32()
self.VadBitMapHint = v_uint32()
self.UsedPageTableEntries = vstruct.VArray([ v_uint16() for i in xrange(768) ])
self.CommittedPageTables = vstruct.VArray([ v_uint32() for i in xrange(24) ])
class DBGKD_CONTINUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContinueStatus = v_uint32()
class _unnamed_14102(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class _unnamed_14103(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags2 = v_uint32()
class SUPPORTED_RANGES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Sorted = v_uint8()
self.Reserved = v_uint8()
self.NoIO = v_uint32()
self.IO = SUPPORTED_RANGE()
self.NoMemory = v_uint32()
self._pad0030 = v_bytes(size=4)
self.Memory = SUPPORTED_RANGE()
self.NoPrefetchMemory = v_uint32()
self._pad0058 = v_bytes(size=4)
self.PrefetchMemory = SUPPORTED_RANGE()
self.NoDma = v_uint32()
self._pad0080 = v_bytes(size=4)
self.Dma = SUPPORTED_RANGE()
class WORK_QUEUE_ITEM(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
self.WorkerRoutine = v_ptr32()
self.Parameter = v_ptr32()
class _unnamed_14104(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
class _unnamed_14105(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Banked = v_ptr32()
class EPROCESS_QUOTA_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Usage = v_uint32()
self.Limit = v_uint32()
self.Peak = v_uint32()
self.Return = v_uint32()
class KSPECIAL_REGISTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cr0 = v_uint32()
self.Cr2 = v_uint32()
self.Cr3 = v_uint32()
self.Cr4 = v_uint32()
self.KernelDr0 = v_uint32()
self.KernelDr1 = v_uint32()
self.KernelDr2 = v_uint32()
self.KernelDr3 = v_uint32()
self.KernelDr6 = v_uint32()
self.KernelDr7 = v_uint32()
self.Gdtr = DESCRIPTOR()
self.Idtr = DESCRIPTOR()
self.Tr = v_uint16()
self.Ldtr = v_uint16()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(6) ])
class KINTERRUPT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.InterruptListEntry = LIST_ENTRY()
self.ServiceRoutine = v_ptr32()
self.ServiceContext = v_ptr32()
self.SpinLock = v_uint32()
self.TickCount = v_uint32()
self.ActualLock = v_ptr32()
self.DispatchAddress = v_ptr32()
self.Vector = v_uint32()
self.Irql = v_uint8()
self.SynchronizeIrql = v_uint8()
self.FloatingSave = v_uint8()
self.Connected = v_uint8()
self.Number = v_uint8()
self.ShareVector = v_uint8()
self._pad0030 = v_bytes(size=2)
self.Mode = v_uint32()
self.ServiceCount = v_uint32()
self.DispatchCount = v_uint32()
self.DispatchCode = vstruct.VArray([ v_uint32() for i in xrange(106) ])
class RTL_CRITICAL_SECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DebugInfo = v_ptr32()
self.LockCount = v_uint32()
self.RecursionCount = v_uint32()
self.OwningThread = v_ptr32()
self.LockSemaphore = v_ptr32()
self.SpinCount = v_uint32()
class _unnamed_16782(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Argument1 = v_ptr32()
self.Argument2 = v_ptr32()
self.Argument3 = v_ptr32()
self.Argument4 = v_ptr32()
self.Argument5 = v_ptr32()
class _unnamed_16780(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ResourceToRelease = v_ptr32()
class _unnamed_16781(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SyncType = v_uint32()
self.PageProtection = v_uint32()
class KSYSTEM_TIME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.High1Time = v_uint32()
self.High2Time = v_uint32()
class PO_DEVICE_NOTIFY_ORDER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DevNodeSequence = v_uint32()
self.WarmEjectPdoPointer = v_ptr32()
self.OrderLevel = vstruct.VArray([ PO_NOTIFY_ORDER_LEVEL() for i in xrange(8) ])
class _unnamed_11882(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadMemory = DBGKD_READ_MEMORY32()
self._pad0028 = v_bytes(size=28)
class FLOATING_SAVE_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint32()
self.StatusWord = v_uint32()
self.TagWord = v_uint32()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(80) ])
self.Cr0NpxState = v_uint32()
class WMI_LOGGER_MODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SequentialFile = v_uint32()
class KQUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.EntryListHead = LIST_ENTRY()
self.CurrentCount = v_uint32()
self.MaximumCount = v_uint32()
self.ThreadListHead = LIST_ENTRY()
class POOL_TRACKER_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Key = v_uint32()
self.NonPagedAllocs = v_uint32()
self.NonPagedFrees = v_uint32()
self.NonPagedBytes = v_uint32()
self.PagedAllocs = v_uint32()
self.PagedFrees = v_uint32()
self.PagedBytes = v_uint32()
class _unnamed_16666(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DiskId = GUID()
class WMI_BUFFER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Free = v_uint32()
class LUID_AND_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Luid = LUID()
self.Attributes = v_uint32()
class _unnamed_15560(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Base = v_uint32()
self.Limit = v_uint32()
class MMMOD_WRITER_MDL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.WriteOffset = LARGE_INTEGER()
self.u = _unnamed_15130()
self.Irp = v_ptr32()
self.LastPageToWrite = v_uint32()
self.PagingListHead = v_ptr32()
self.CurrentList = v_ptr32()
self.PagingFile = v_ptr32()
self.File = v_ptr32()
self.ControlArea = v_ptr32()
self.FileResource = v_ptr32()
self.Mdl = MDL()
self.Page = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class CACHED_CHILD_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.ValueList = v_uint32()
class KTHREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.MutantListHead = LIST_ENTRY()
self.InitialStack = v_ptr32()
self.StackLimit = v_ptr32()
self.Teb = v_ptr32()
self.TlsArray = v_ptr32()
self.KernelStack = v_ptr32()
self.DebugActive = v_uint8()
self.State = v_uint8()
self.Alerted = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.Iopl = v_uint8()
self.NpxState = v_uint8()
self.Saturation = v_uint8()
self.Priority = v_uint8()
self.ApcState = KAPC_STATE()
self.ContextSwitches = v_uint32()
self.IdleSwapBlock = v_uint8()
self.Spare0 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.WaitStatus = v_uint32()
self.WaitIrql = v_uint8()
self.WaitMode = v_uint8()
self.WaitNext = v_uint8()
self.WaitReason = v_uint8()
self.WaitBlockList = v_ptr32()
self.WaitListEntry = LIST_ENTRY()
self.WaitTime = v_uint32()
self.BasePriority = v_uint8()
self.DecrementCount = v_uint8()
self.PriorityDecrement = v_uint8()
self.Quantum = v_uint8()
self.WaitBlock = vstruct.VArray([ KWAIT_BLOCK() for i in xrange(4) ])
self.LegoData = v_ptr32()
self.KernelApcDisable = v_uint32()
self.UserAffinity = v_uint32()
self.SystemAffinityActive = v_uint8()
self.PowerState = v_uint8()
self.NpxIrql = v_uint8()
self.InitialNode = v_uint8()
self.ServiceTable = v_ptr32()
self.Queue = v_ptr32()
self.ApcQueueLock = v_uint32()
self._pad00f0 = v_bytes(size=4)
self.Timer = KTIMER()
self.QueueListEntry = LIST_ENTRY()
self.SoftAffinity = v_uint32()
self.Affinity = v_uint32()
self.Preempted = v_uint8()
self.ProcessReadyQueue = v_uint8()
self.KernelStackResident = v_uint8()
self.NextProcessor = v_uint8()
self.CallbackStack = v_ptr32()
self.Win32Thread = v_ptr32()
self.TrapFrame = v_ptr32()
self.ApcStatePointer = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.PreviousMode = v_uint8()
self.EnableStackSwap = v_uint8()
self.LargeStack = v_uint8()
self.ResourceIndex = v_uint8()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.SavedApcState = KAPC_STATE()
self.Alertable = v_uint8()
self.ApcStateIndex = v_uint8()
self.ApcQueueable = v_uint8()
self.AutoAlignment = v_uint8()
self.StackBase = v_ptr32()
self.SuspendApc = KAPC()
self.SuspendSemaphore = KSEMAPHORE()
self.ThreadListEntry = LIST_ENTRY()
self.FreezeCount = v_uint8()
self.SuspendCount = v_uint8()
self.IdealProcessor = v_uint8()
self.DisableBoost = v_uint8()
self._pad01c0 = v_bytes(size=4)
class _unnamed_12531(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class ADAPTER_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class _unnamed_10508(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFlags = v_uint32()
self.Dr0 = v_uint32()
self.Dr1 = v_uint32()
self.Dr2 = v_uint32()
self.Dr3 = v_uint32()
self.Dr6 = v_uint32()
self.Dr7 = v_uint32()
self.FloatSave = FLOATING_SAVE_AREA()
self.SegGs = v_uint32()
self.SegFs = v_uint32()
self.SegEs = v_uint32()
self.SegDs = v_uint32()
self.Edi = v_uint32()
self.Esi = v_uint32()
self.Ebx = v_uint32()
self.Edx = v_uint32()
self.Ecx = v_uint32()
self.Eax = v_uint32()
self.Ebp = v_uint32()
self.Eip = v_uint32()
self.SegCs = v_uint32()
self.EFlags = v_uint32()
self.Esp = v_uint32()
self.SegSs = v_uint32()
self.ExtendedRegisters = vstruct.VArray([ v_uint8() for i in xrange(512) ])
class DBGKD_GET_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Unused = v_uint32()
class GENERIC_MAPPING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.GenericRead = v_uint32()
self.GenericWrite = v_uint32()
self.GenericExecute = v_uint32()
self.GenericAll = v_uint32()
class DEVICE_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Sibling = v_ptr32()
self.Child = v_ptr32()
self.Parent = v_ptr32()
self.LastChild = v_ptr32()
self.Level = v_uint32()
self.Notify = v_ptr32()
self.State = v_uint32()
self.PreviousState = v_uint32()
self.StateHistory = vstruct.VArray([ PNP_DEVNODE_STATE() for i in xrange(20) ])
self.StateHistoryEntry = v_uint32()
self.CompletionStatus = v_uint32()
self.PendingIrp = v_ptr32()
self.Flags = v_uint32()
self.UserFlags = v_uint32()
self.Problem = v_uint32()
self.PhysicalDeviceObject = v_ptr32()
self.ResourceList = v_ptr32()
self.ResourceListTranslated = v_ptr32()
self.InstancePath = UNICODE_STRING()
self.ServiceName = UNICODE_STRING()
self.DuplicatePDO = v_ptr32()
self.ResourceRequirements = v_ptr32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.ChildInterfaceType = v_uint32()
self.ChildBusNumber = v_uint32()
self.ChildBusTypeIndex = v_uint16()
self.RemovalPolicy = v_uint8()
self.HardwareRemovalPolicy = v_uint8()
self.TargetDeviceNotify = LIST_ENTRY()
self.DeviceArbiterList = LIST_ENTRY()
self.DeviceTranslatorList = LIST_ENTRY()
self.NoTranslatorMask = v_uint16()
self.QueryTranslatorMask = v_uint16()
self.NoArbiterMask = v_uint16()
self.QueryArbiterMask = v_uint16()
self.OverUsed1 = _unnamed_12916()
self.OverUsed2 = _unnamed_12917()
self.BootResources = v_ptr32()
self.CapabilityFlags = v_uint32()
self.DockInfo = _unnamed_12918()
self.DisableableDepends = v_uint32()
self.PendedSetInterfaceState = LIST_ENTRY()
self.LegacyBusListEntry = LIST_ENTRY()
self.DriverUnloadRetryCount = v_uint32()
class RTL_ATOM_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.CriticalSection = RTL_CRITICAL_SECTION()
self.RtlHandleTable = RTL_HANDLE_TABLE()
self.NumberOfBuckets = v_uint32()
self.Buckets = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class _unnamed_15130(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoStatus = IO_STATUS_BLOCK()
class KUSER_SHARED_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TickCountLow = v_uint32()
self.TickCountMultiplier = v_uint32()
self.InterruptTime = KSYSTEM_TIME()
self.SystemTime = KSYSTEM_TIME()
self.TimeZoneBias = KSYSTEM_TIME()
self.ImageNumberLow = v_uint16()
self.ImageNumberHigh = v_uint16()
self.NtSystemRoot = vstruct.VArray([ v_uint16() for i in xrange(260) ])
self.MaxStackTraceDepth = v_uint32()
self.CryptoExponent = v_uint32()
self.TimeZoneId = v_uint32()
self.Reserved2 = vstruct.VArray([ v_uint32() for i in xrange(8) ])
self.NtProductType = v_uint32()
self.ProductTypeIsValid = v_uint8()
self._pad026c = v_bytes(size=3)
self.NtMajorVersion = v_uint32()
self.NtMinorVersion = v_uint32()
self.ProcessorFeatures = vstruct.VArray([ v_uint8() for i in xrange(64) ])
self.Reserved1 = v_uint32()
self.Reserved3 = v_uint32()
self.TimeSlip = v_uint32()
self.AlternativeArchitecture = v_uint32()
self._pad02c8 = v_bytes(size=4)
self.SystemExpirationDate = LARGE_INTEGER()
self.SuiteMask = v_uint32()
self.KdDebuggerEnabled = v_uint8()
self.NXSupportPolicy = v_uint8()
self._pad02d8 = v_bytes(size=2)
self.ActiveConsoleId = v_uint32()
self.DismountCount = v_uint32()
self.ComPlusPackage = v_uint32()
self.LastSystemRITEventTickCount = v_uint32()
self.NumberOfPhysicalPages = v_uint32()
self.SafeBootMode = v_uint8()
self._pad02f0 = v_bytes(size=3)
self.TraceLogging = v_uint32()
self._pad02f8 = v_bytes(size=4)
self.TestRetInstruction = v_uint64()
self.SystemCall = v_uint32()
self.SystemCallReturn = v_uint32()
self.SystemCallPad = vstruct.VArray([ v_uint64() for i in xrange(3) ])
self.TickCount = KSYSTEM_TIME()
self._pad0330 = v_bytes(size=4)
self.Cookie = v_uint32()
self._pad0338 = v_bytes(size=4)
class IMAGE_ROM_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_uint16()
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.BaseOfBss = v_uint32()
self.GprMask = v_uint32()
self.CprMask = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.GpValue = v_uint32()
class _unnamed_16242(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemContext = v_uint32()
self.Type = v_uint32()
self.State = POWER_STATE()
self.ShutdownType = v_uint32()
class HEAP_FREE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.PreviousSize = v_uint16()
self.SmallTagIndex = v_uint8()
self.Flags = v_uint8()
self.UnusedBytes = v_uint8()
self.SegmentIndex = v_uint8()
self.FreeList = LIST_ENTRY()
class LDR_DATA_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InLoadOrderLinks = LIST_ENTRY()
self.InMemoryOrderLinks = LIST_ENTRY()
self.InInitializationOrderLinks = LIST_ENTRY()
self.DllBase = v_ptr32()
self.EntryPoint = v_ptr32()
self.SizeOfImage = v_uint32()
self.FullDllName = UNICODE_STRING()
self.BaseDllName = UNICODE_STRING()
self.Flags = v_uint32()
self.LoadCount = v_uint16()
self.TlsIndex = v_uint16()
self.HashLinks = LIST_ENTRY()
self.TimeDateStamp = v_uint32()
self.EntryPointActivationContext = v_ptr32()
self.PatchInformation = v_ptr32()
class MMADDRESS_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartVpn = v_uint32()
self.EndVpn = v_uint32()
class _unnamed_15988(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.FileAttributes = v_uint16()
self.ShareAccess = v_uint16()
self.EaLength = v_uint32()
class DBGKD_READ_MEMORY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint64()
self.TransferCount = v_uint32()
self.ActualBytesRead = v_uint32()
class PO_MEMORY_IMAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.Version = v_uint32()
self.CheckSum = v_uint32()
self.LengthSelf = v_uint32()
self.PageSelf = v_uint32()
self.PageSize = v_uint32()
self.ImageType = v_uint32()
self._pad0020 = v_bytes(size=4)
self.SystemTime = LARGE_INTEGER()
self.InterruptTime = v_uint64()
self.FeatureFlags = v_uint32()
self.HiberFlags = v_uint8()
self.spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.NoHiberPtes = v_uint32()
self.HiberVa = v_uint32()
self.HiberPte = LARGE_INTEGER()
self.NoFreePages = v_uint32()
self.FreeMapCheck = v_uint32()
self.WakeCheck = v_uint32()
self.TotalPages = v_uint32()
self.FirstTablePage = v_uint32()
self.LastFilePage = v_uint32()
self.PerfInfo = PO_HIBER_PERF()
class HEAP_UCR_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ReservedSize = v_uint32()
self.CommittedSize = v_uint32()
self.filler = v_uint32()
class HHIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.GetCellRoutine = v_ptr32()
self.ReleaseCellRoutine = v_ptr32()
self.Allocate = v_ptr32()
self.Free = v_ptr32()
self.FileSetSize = v_ptr32()
self.FileWrite = v_ptr32()
self.FileRead = v_ptr32()
self.FileFlush = v_ptr32()
self.BaseBlock = v_ptr32()
self.DirtyVector = RTL_BITMAP()
self.DirtyCount = v_uint32()
self.DirtyAlloc = v_uint32()
self.RealWrites = v_uint8()
self._pad003c = v_bytes(size=3)
self.Cluster = v_uint32()
self.Flat = v_uint8()
self.ReadOnly = v_uint8()
self.Log = v_uint8()
self._pad0044 = v_bytes(size=1)
self.HiveFlags = v_uint32()
self.LogSize = v_uint32()
self.RefreshCount = v_uint32()
self.StorageTypeCount = v_uint32()
self.Version = v_uint32()
self.Storage = vstruct.VArray([ DUAL() for i in xrange(2) ])
class TEB_ACTIVE_FRAME_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.FrameName = v_ptr32()
class TEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NtTib = NT_TIB()
self.EnvironmentPointer = v_ptr32()
self.ClientId = CLIENT_ID()
self.ActiveRpcHandle = v_ptr32()
self.ThreadLocalStoragePointer = v_ptr32()
self.ProcessEnvironmentBlock = v_ptr32()
self.LastErrorValue = v_uint32()
self.CountOfOwnedCriticalSections = v_uint32()
self.CsrClientThread = v_ptr32()
self.Win32ThreadInfo = v_ptr32()
self.User32Reserved = vstruct.VArray([ v_uint32() for i in xrange(26) ])
self.UserReserved = vstruct.VArray([ v_uint32() for i in xrange(5) ])
self.WOW32Reserved = v_ptr32()
self.CurrentLocale = v_uint32()
self.FpSoftwareStatusRegister = v_uint32()
self.SystemReserved1 = vstruct.VArray([ v_ptr32() for i in xrange(54) ])
self.ExceptionCode = v_uint32()
self.ActivationContextStack = ACTIVATION_CONTEXT_STACK()
self.SpareBytes1 = vstruct.VArray([ v_uint8() for i in xrange(24) ])
self.GdiTebBatch = GDI_TEB_BATCH()
self.RealClientId = CLIENT_ID()
self.GdiCachedProcessHandle = v_ptr32()
self.GdiClientPID = v_uint32()
self.GdiClientTID = v_uint32()
self.GdiThreadLocalInfo = v_ptr32()
self.Win32ClientInfo = vstruct.VArray([ v_uint32() for i in xrange(62) ])
self.glDispatchTable = vstruct.VArray([ v_ptr32() for i in xrange(233) ])
self.glReserved1 = vstruct.VArray([ v_uint32() for i in xrange(29) ])
self.glReserved2 = v_ptr32()
self.glSectionInfo = v_ptr32()
self.glSection = v_ptr32()
self.glTable = v_ptr32()
self.glCurrentRC = v_ptr32()
self.glContext = v_ptr32()
self.LastStatusValue = v_uint32()
self.StaticUnicodeString = UNICODE_STRING()
self.StaticUnicodeBuffer = vstruct.VArray([ v_uint16() for i in xrange(261) ])
self._pad0e0c = v_bytes(size=2)
self.DeallocationStack = v_ptr32()
self.TlsSlots = vstruct.VArray([ v_ptr32() for i in xrange(64) ])
self.TlsLinks = LIST_ENTRY()
self.Vdm = v_ptr32()
self.ReservedForNtRpc = v_ptr32()
self.DbgSsReserved = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.HardErrorsAreDisabled = v_uint32()
self.Instrumentation = vstruct.VArray([ v_ptr32() for i in xrange(16) ])
self.WinSockData = v_ptr32()
self.GdiBatchCount = v_uint32()
self.InDbgPrint = v_uint8()
self.FreeStackOnTermination = v_uint8()
self.HasFiberData = v_uint8()
self.IdealProcessor = v_uint8()
self.Spare3 = v_uint32()
self.ReservedForPerf = v_ptr32()
self.ReservedForOle = v_ptr32()
self.WaitingOnLoaderLock = v_uint32()
self.Wx86Thread = Wx86ThreadState()
self.TlsExpansionSlots = v_ptr32()
self.ImpersonationLocale = v_uint32()
self.IsImpersonating = v_uint32()
self.NlsCache = v_ptr32()
self.pShimData = v_ptr32()
self.HeapVirtualAffinity = v_uint32()
self.CurrentTransactionHandle = v_ptr32()
self.ActiveFrame = v_ptr32()
self.SafeThunkCall = v_uint8()
self.BooleanSpare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
class DRIVER_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.Flags = v_uint32()
self.DriverStart = v_ptr32()
self.DriverSize = v_uint32()
self.DriverSection = v_ptr32()
self.DriverExtension = v_ptr32()
self.DriverName = UNICODE_STRING()
self.HardwareDatabase = v_ptr32()
self.FastIoDispatch = v_ptr32()
self.DriverInit = v_ptr32()
self.DriverStartIo = v_ptr32()
self.DriverUnload = v_ptr32()
self.MajorFunction = vstruct.VArray([ v_ptr32() for i in xrange(28) ])
class OBJECT_SYMBOLIC_LINK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LinkTarget = UNICODE_STRING()
self.LinkTargetRemaining = UNICODE_STRING()
self.LinkTargetObject = v_ptr32()
self.DosDeviceDriveIndex = v_uint32()
class EJOB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Event = KEVENT()
self.JobLinks = LIST_ENTRY()
self.ProcessListHead = LIST_ENTRY()
self.JobLock = ERESOURCE()
self.TotalUserTime = LARGE_INTEGER()
self.TotalKernelTime = LARGE_INTEGER()
self.ThisPeriodTotalUserTime = LARGE_INTEGER()
self.ThisPeriodTotalKernelTime = LARGE_INTEGER()
self.TotalPageFaultCount = v_uint32()
self.TotalProcesses = v_uint32()
self.ActiveProcesses = v_uint32()
self.TotalTerminatedProcesses = v_uint32()
self.PerProcessUserTimeLimit = LARGE_INTEGER()
self.PerJobUserTimeLimit = LARGE_INTEGER()
self.LimitFlags = v_uint32()
self.MinimumWorkingSetSize = v_uint32()
self.MaximumWorkingSetSize = v_uint32()
self.ActiveProcessLimit = v_uint32()
self.Affinity = v_uint32()
self.PriorityClass = v_uint8()
self._pad00b0 = v_bytes(size=3)
self.UIRestrictionsClass = v_uint32()
self.SecurityLimitFlags = v_uint32()
self.Token = v_ptr32()
self.Filter = v_ptr32()
self.EndOfJobTimeAction = v_uint32()
self.CompletionPort = v_ptr32()
self.CompletionKey = v_ptr32()
self.SessionId = v_uint32()
self.SchedulingClass = v_uint32()
self._pad00d8 = v_bytes(size=4)
self.ReadOperationCount = v_uint64()
self.WriteOperationCount = v_uint64()
self.OtherOperationCount = v_uint64()
self.ReadTransferCount = v_uint64()
self.WriteTransferCount = v_uint64()
self.OtherTransferCount = v_uint64()
self.IoInfo = IO_COUNTERS()
self.ProcessMemoryLimit = v_uint32()
self.JobMemoryLimit = v_uint32()
self.PeakProcessMemoryUsed = v_uint32()
self.PeakJobMemoryUsed = v_uint32()
self.CurrentJobMemoryUsed = v_uint32()
self.MemoryLimitsLock = FAST_MUTEX()
self.JobSetLinks = LIST_ENTRY()
self.MemberLevel = v_uint32()
self.JobFlags = v_uint32()
self._pad0180 = v_bytes(size=4)
class _unnamed_16023(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Key = v_uint32()
self.ByteOffset = LARGE_INTEGER()
class DBGKD_READ_WRITE_IO_EXTENDED64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.AddressSpace = v_uint32()
self.IoAddress = v_uint64()
self.DataValue = v_uint32()
self._pad0020 = v_bytes(size=4)
class IO_STATUS_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Status = v_uint32()
self.Information = v_uint32()
class KPROCESSOR_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFrame = CONTEXT()
self.SpecialRegisters = KSPECIAL_REGISTERS()
class KiIoAccessMap(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DirectionMap = vstruct.VArray([ v_uint8() for i in xrange(32) ])
self.IoMap = vstruct.VArray([ v_uint8() for i in xrange(8196) ])
class KAPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.Spare0 = v_uint32()
self.Thread = v_ptr32()
self.ApcListEntry = LIST_ENTRY()
self.KernelRoutine = v_ptr32()
self.RundownRoutine = v_ptr32()
self.NormalRoutine = v_ptr32()
self.NormalContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.ApcStateIndex = v_uint8()
self.ApcMode = v_uint8()
self.Inserted = v_uint8()
self._pad0030 = v_bytes(size=1)
class POOL_TRACKER_BIG_PAGES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Va = v_ptr32()
self.Key = v_uint32()
self.NumberOfPages = v_uint32()
class SID_IDENTIFIER_AUTHORITY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Value = vstruct.VArray([ v_uint8() for i in xrange(6) ])
class RTL_RANGE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Flags = v_uint32()
self.Count = v_uint32()
self.Stamp = v_uint32()
class LARGE_CONTROL_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Segment = v_ptr32()
self.DereferenceList = LIST_ENTRY()
self.NumberOfSectionReferences = v_uint32()
self.NumberOfPfnReferences = v_uint32()
self.NumberOfMappedViews = v_uint32()
self.NumberOfSubsections = v_uint16()
self.FlushInProgressCount = v_uint16()
self.NumberOfUserReferences = v_uint32()
self.u = _unnamed_12520()
self.FilePointer = v_ptr32()
self.WaitingForDeletion = v_ptr32()
self.ModifiedWriteCount = v_uint16()
self.NumberOfSystemCacheViews = v_uint16()
self.StartingFrame = v_uint32()
self.UserGlobalList = LIST_ENTRY()
self.SessionId = v_uint32()
class VI_POOL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InUse = VI_POOL_ENTRY_INUSE()
class POOL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PreviousSize = v_uint16()
self.BlockSize = v_uint16()
self.ProcessBilled = v_ptr32()
class SHARED_CACHE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeByteSize = v_uint16()
self.OpenCount = v_uint32()
self.FileSize = LARGE_INTEGER()
self.BcbList = LIST_ENTRY()
self.SectionSize = LARGE_INTEGER()
self.ValidDataLength = LARGE_INTEGER()
self.ValidDataGoal = LARGE_INTEGER()
self.InitialVacbs = vstruct.VArray([ v_ptr32() for i in xrange(4) ])
self.Vacbs = v_ptr32()
self.FileObject = v_ptr32()
self.ActiveVacb = v_ptr32()
self.NeedToZero = v_ptr32()
self.ActivePage = v_uint32()
self.NeedToZeroPage = v_uint32()
self.ActiveVacbSpinLock = v_uint32()
self.VacbActiveCount = v_uint32()
self.DirtyPages = v_uint32()
self.SharedCacheMapLinks = LIST_ENTRY()
self.Flags = v_uint32()
self.Status = v_uint32()
self.Mbcb = v_ptr32()
self.Section = v_ptr32()
self.CreateEvent = v_ptr32()
self.WaitOnActiveCount = v_ptr32()
self.PagesToWrite = v_uint32()
self.BeyondLastFlush = v_uint64()
self.Callbacks = v_ptr32()
self.LazyWriteContext = v_ptr32()
self.PrivateList = LIST_ENTRY()
self.LogHandle = v_ptr32()
self.FlushToLsnRoutine = v_ptr32()
self.DirtyPageThreshold = v_uint32()
self.LazyWritePassCount = v_uint32()
self.UninitializeEvent = v_ptr32()
self.NeedToZeroVacb = v_ptr32()
self.BcbSpinLock = v_uint32()
self.Reserved = v_ptr32()
self.Event = KEVENT()
self.VacbPushLock = EX_PUSH_LOCK()
self._pad00d8 = v_bytes(size=4)
self.PrivateCacheMap = PRIVATE_CACHE_MAP()
class TRACE_ENABLE_FLAG_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint16()
self.Length = v_uint8()
self.Flag = v_uint8()
class MI_VERIFIER_POOL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListIndex = v_uint32()
self.Verifier = v_ptr32()
class MMBANKED_SECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BasePhysicalPage = v_uint32()
self.BasedPte = v_ptr32()
self.BankSize = v_uint32()
self.BankShift = v_uint32()
self.BankedRoutine = v_ptr32()
self.Context = v_ptr32()
self.CurrentMappedPte = v_ptr32()
self.BankTemplate = vstruct.VArray([ MMPTE() for i in xrange(1) ])
class PCI_POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentSystemState = v_uint32()
self.CurrentDeviceState = v_uint32()
self.SystemWakeLevel = v_uint32()
self.DeviceWakeLevel = v_uint32()
self.SystemStateMapping = vstruct.VArray([ DEVICE_POWER_STATE() for i in xrange(7) ])
self.WaitWakeIrp = v_ptr32()
self.SavedCancelRoutine = v_ptr32()
self.Paging = v_uint32()
self.Hibernate = v_uint32()
self.CrashDump = v_uint32()
class RTL_CRITICAL_SECTION_DEBUG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.CreatorBackTraceIndex = v_uint16()
self.CriticalSection = v_ptr32()
self.ProcessLocksList = LIST_ENTRY()
self.EntryCount = v_uint32()
self.ContentionCount = v_uint32()
self.Spare = vstruct.VArray([ v_uint32() for i in xrange(2) ])
class PNP_DEVICE_EVENT_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.Argument = v_uint32()
self.CallerEvent = v_ptr32()
self.Callback = v_ptr32()
self.Context = v_ptr32()
self.VetoType = v_ptr32()
self.VetoName = v_ptr32()
self.Data = PLUGPLAY_EVENT_BLOCK()
class ARBITER_CONFLICT_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwningObject = v_ptr32()
self._pad0008 = v_bytes(size=4)
self.Start = v_uint64()
self.End = v_uint64()
class SID_AND_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Sid = v_ptr32()
self.Attributes = v_uint32()
class VI_DEADLOCK_GLOBALS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Nodes = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.Resources = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.Threads = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.TimeAcquire = v_uint64()
self.TimeRelease = v_uint64()
self.BytesAllocated = v_uint32()
self.ResourceDatabase = v_ptr32()
self.ThreadDatabase = v_ptr32()
self.AllocationFailures = v_uint32()
self.NodesTrimmedBasedOnAge = v_uint32()
self.NodesTrimmedBasedOnCount = v_uint32()
self.NodesSearched = v_uint32()
self.MaxNodesSearched = v_uint32()
self.SequenceNumber = v_uint32()
self.RecursionDepthLimit = v_uint32()
self.SearchedNodesLimit = v_uint32()
self.DepthLimitHits = v_uint32()
self.SearchLimitHits = v_uint32()
self.ABC_ACB_Skipped = v_uint32()
self.FreeResourceList = LIST_ENTRY()
self.FreeThreadList = LIST_ENTRY()
self.FreeNodeList = LIST_ENTRY()
self.FreeResourceCount = v_uint32()
self.FreeThreadCount = v_uint32()
self.FreeNodeCount = v_uint32()
self.Instigator = v_ptr32()
self.NumberOfParticipants = v_uint32()
self.Participant = vstruct.VArray([ v_ptr32() for i in xrange(32) ])
self.CacheReductionInProgress = v_uint32()
class TOKEN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TokenSource = TOKEN_SOURCE()
self.TokenId = LUID()
self.AuthenticationId = LUID()
self.ParentTokenId = LUID()
self.ExpirationTime = LARGE_INTEGER()
self.TokenLock = v_ptr32()
self._pad0038 = v_bytes(size=4)
self.AuditPolicy = SEP_AUDIT_POLICY()
self.ModifiedId = LUID()
self.SessionId = v_uint32()
self.UserAndGroupCount = v_uint32()
self.RestrictedSidCount = v_uint32()
self.PrivilegeCount = v_uint32()
self.VariableLength = v_uint32()
self.DynamicCharged = v_uint32()
self.DynamicAvailable = v_uint32()
self.DefaultOwnerIndex = v_uint32()
self.UserAndGroups = v_ptr32()
self.RestrictedSids = v_ptr32()
self.PrimaryGroup = v_ptr32()
self.Privileges = v_ptr32()
self.DynamicPart = v_ptr32()
self.DefaultDacl = v_ptr32()
self.TokenType = v_uint32()
self.ImpersonationLevel = v_uint32()
self.TokenFlags = v_uint32()
self.TokenInUse = v_uint8()
self._pad0090 = v_bytes(size=3)
self.ProxyData = v_ptr32()
self.AuditData = v_ptr32()
self.OriginatingLogonSession = LUID()
self.VariablePart = v_uint32()
self._pad00a8 = v_bytes(size=4)
class MMCOLOR_TABLES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
self.Blink = v_ptr32()
self.Count = v_uint32()
class DISPATCHER_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint8()
self.Absolute = v_uint8()
self.Size = v_uint8()
self.Inserted = v_uint8()
self.SignalState = v_uint32()
self.WaitListHead = LIST_ENTRY()
class _unnamed_16509(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceNumber = v_uint32()
class _unnamed_16110(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OutputBufferLength = v_uint32()
self.InputBufferLength = v_uint32()
self.FsControlCode = v_uint32()
self.Type3InputBuffer = v_ptr32()
class _unnamed_16505(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mbr = _unnamed_16663()
self._pad0010 = v_bytes(size=8)
class DBGKD_READ_WRITE_IO64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoAddress = v_uint64()
self.DataSize = v_uint32()
self.DataValue = v_uint32()
class PROCESSOR_POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdleFunction = v_ptr32()
self.Idle0KernelTimeLimit = v_uint32()
self.Idle0LastTime = v_uint32()
self.IdleHandlers = v_ptr32()
self.IdleState = v_ptr32()
self.IdleHandlersCount = v_uint32()
self.LastCheck = v_uint64()
self.IdleTimes = PROCESSOR_IDLE_TIMES()
self.IdleTime1 = v_uint32()
self.PromotionCheck = v_uint32()
self.IdleTime2 = v_uint32()
self.CurrentThrottle = v_uint8()
self.ThermalThrottleLimit = v_uint8()
self.CurrentThrottleIndex = v_uint8()
self.ThermalThrottleIndex = v_uint8()
self.LastKernelUserTime = v_uint32()
self.LastIdleThreadKernelTime = v_uint32()
self.PackageIdleStartTime = v_uint32()
self.PackageIdleTime = v_uint32()
self.DebugCount = v_uint32()
self.LastSysTime = v_uint32()
self.TotalIdleStateTime = vstruct.VArray([ v_uint64() for i in xrange(3) ])
self.TotalIdleTransitions = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self._pad0090 = v_bytes(size=4)
self.PreviousC3StateTime = v_uint64()
self.KneeThrottleIndex = v_uint8()
self.ThrottleLimitIndex = v_uint8()
self.PerfStatesCount = v_uint8()
self.ProcessorMinThrottle = v_uint8()
self.ProcessorMaxThrottle = v_uint8()
self.EnableIdleAccounting = v_uint8()
self.LastC3Percentage = v_uint8()
self.LastAdjustedBusyPercentage = v_uint8()
self.PromotionCount = v_uint32()
self.DemotionCount = v_uint32()
self.ErrorCount = v_uint32()
self.RetryCount = v_uint32()
self.Flags = v_uint32()
self._pad00b8 = v_bytes(size=4)
self.PerfCounterFrequency = LARGE_INTEGER()
self.PerfTickCount = v_uint32()
self._pad00c8 = v_bytes(size=4)
self.PerfTimer = KTIMER()
self.PerfDpc = KDPC()
self.PerfStates = v_ptr32()
self.PerfSetThrottle = v_ptr32()
self.LastC3KernelUserTime = v_uint32()
self.LastPackageIdleTime = v_uint32()
class SECURITY_CLIENT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityQos = SECURITY_QUALITY_OF_SERVICE()
self.ClientToken = v_ptr32()
self.DirectlyAccessClientToken = v_uint8()
self.DirectAccessEffectiveOnly = v_uint8()
self.ServerIsRemote = v_uint8()
self._pad0014 = v_bytes(size=1)
self.ClientTokenControl = TOKEN_CONTROL()
class DBGKD_SEARCH_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SearchAddress = v_uint64()
self.SearchLength = v_uint64()
self.PatternLength = v_uint32()
self._pad0018 = v_bytes(size=4)
class DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Pad = v_uint16()
self.Limit = v_uint16()
self.Base = v_uint32()
class DBGKD_MANIPULATE_STATE64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApiNumber = v_uint32()
self.ProcessorLevel = v_uint16()
self.Processor = v_uint16()
self.ReturnStatus = v_uint32()
self._pad0010 = v_bytes(size=4)
self.u = _unnamed_11794()
class LPCP_PORT_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NonPagedPortQueue = v_ptr32()
self.Semaphore = v_ptr32()
self.ReceiveHead = LIST_ENTRY()
class DBGKD_LOAD_SYMBOLS64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PathNameLength = v_uint32()
self._pad0008 = v_bytes(size=4)
self.BaseOfDll = v_uint64()
self.ProcessId = v_uint64()
self.CheckSum = v_uint32()
self.SizeOfImage = v_uint32()
self.UnloadSymbols = v_uint8()
self._pad0028 = v_bytes(size=7)
class CACHE_UNINITIALIZE_EVENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Event = KEVENT()
class SECURITY_QUALITY_OF_SERVICE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.ImpersonationLevel = v_uint32()
self.ContextTrackingMode = v_uint8()
self.EffectiveOnly = v_uint8()
self._pad000c = v_bytes(size=2)
class COMPRESSED_DATA_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CompressionFormatAndEngine = v_uint16()
self.CompressionUnitShift = v_uint8()
self.ChunkShift = v_uint8()
self.ClusterShift = v_uint8()
self.Reserved = v_uint8()
self.NumberOfChunks = v_uint16()
self.CompressedChunkSizes = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class _unnamed_14650(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint32()
self.Length = v_uint32()
self.Reserved = v_uint32()
class RTL_HANDLE_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MaximumNumberOfHandles = v_uint32()
self.SizeOfHandleTableEntry = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.FreeHandles = v_ptr32()
self.CommittedHandles = v_ptr32()
self.UnCommittedHandles = v_ptr32()
self.MaxReservedHandles = v_ptr32()
class _unnamed_14654(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class CMHIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Hive = HHIVE()
self.FileHandles = vstruct.VArray([ v_ptr32() for i in xrange(3) ])
self.NotifyList = LIST_ENTRY()
self.HiveList = LIST_ENTRY()
self.HiveLock = v_ptr32()
self.ViewLock = v_ptr32()
self.LRUViewListHead = LIST_ENTRY()
self.PinViewListHead = LIST_ENTRY()
self.FileObject = v_ptr32()
self.FileFullPath = UNICODE_STRING()
self.FileUserName = UNICODE_STRING()
self.MappedViews = v_uint16()
self.PinnedViews = v_uint16()
self.UseCount = v_uint32()
self.SecurityCount = v_uint32()
self.SecurityCacheSize = v_uint32()
self.SecurityHitHint = v_uint32()
self.SecurityCache = v_ptr32()
self.SecurityHash = vstruct.VArray([ LIST_ENTRY() for i in xrange(64) ])
self.UnloadEvent = v_ptr32()
self.RootKcb = v_ptr32()
self.Frozen = v_uint8()
self._pad047c = v_bytes(size=3)
self.UnloadWorkItem = v_ptr32()
self.GrowOnlyMode = v_uint8()
self._pad0484 = v_bytes(size=3)
self.GrowOffset = v_uint32()
self.KcbConvertListHead = LIST_ENTRY()
self.KnodeConvertListHead = LIST_ENTRY()
self.CellRemapArray = v_ptr32()
class POP_SHUTDOWN_BUG_CHECK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Code = v_uint32()
self.Parameter1 = v_uint32()
self.Parameter2 = v_uint32()
self.Parameter3 = v_uint32()
self.Parameter4 = v_uint32()
class SECTION_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVa = v_ptr32()
self.EndingVa = v_ptr32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.Segment = v_ptr32()
class LUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class OBJECT_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PointerCount = v_uint32()
self.HandleCount = v_uint32()
self.Type = v_ptr32()
self.NameInfoOffset = v_uint8()
self.HandleInfoOffset = v_uint8()
self.QuotaInfoOffset = v_uint8()
self.Flags = v_uint8()
self.ObjectCreateInfo = v_ptr32()
self.SecurityDescriptor = v_ptr32()
self.Body = QUAD()
class PCI_MN_DISPATCH_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DispatchStyle = v_uint32()
self.DispatchFunction = v_ptr32()
class PCI_HEADER_TYPE_2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SocketRegistersBaseAddress = v_uint32()
self.CapabilitiesPtr = v_uint8()
self.Reserved = v_uint8()
self.SecondaryStatus = v_uint16()
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SecondaryLatency = v_uint8()
self.Range = vstruct.VArray([ _unnamed_15560() for i in xrange(4) ])
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.BridgeControl = v_uint16()
class PCI_HEADER_TYPE_1(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddresses = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SecondaryLatency = v_uint8()
self.IOBase = v_uint8()
self.IOLimit = v_uint8()
self.SecondaryStatus = v_uint16()
self.MemoryBase = v_uint16()
self.MemoryLimit = v_uint16()
self.PrefetchBase = v_uint16()
self.PrefetchLimit = v_uint16()
self.PrefetchBaseUpper32 = v_uint32()
self.PrefetchLimitUpper32 = v_uint32()
self.IOBaseUpper16 = v_uint16()
self.IOLimitUpper16 = v_uint16()
self.CapabilitiesPtr = v_uint8()
self.Reserved1 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.ROMBaseAddress = v_uint32()
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.BridgeControl = v_uint16()
class PCI_HEADER_TYPE_0(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddresses = vstruct.VArray([ v_uint32() for i in xrange(6) ])
self.CIS = v_uint32()
self.SubVendorID = v_uint16()
self.SubSystemID = v_uint16()
self.ROMBaseAddress = v_uint32()
self.CapabilitiesPtr = v_uint8()
self.Reserved1 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Reserved2 = v_uint32()
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.MinimumGrant = v_uint8()
self.MaximumLatency = v_uint8()
class MMPFN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_13150()
self.PteAddress = v_ptr32()
self.u2 = _unnamed_13151()
self.u3 = _unnamed_13152()
self.OriginalPte = MMPTE()
self.u4 = _unnamed_13153()
class OBJECT_DUMP_CONTROL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Stream = v_ptr32()
self.Detail = v_uint32()
class CACHE_MANAGER_CALLBACKS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AcquireForLazyWrite = v_ptr32()
self.ReleaseFromLazyWrite = v_ptr32()
self.AcquireForReadAhead = v_ptr32()
self.ReleaseFromReadAhead = v_ptr32()
class DBGKD_CONTINUE2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContinueStatus = v_uint32()
self.ControlSet = X86_DBGKD_CONTROL_SET()
self._pad0020 = v_bytes(size=12)
class HANDLE_TRACE_DB_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClientId = CLIENT_ID()
self.Handle = v_ptr32()
self.Type = v_uint32()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(16) ])
class LPCP_NONPAGED_PORT_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Semaphore = KSEMAPHORE()
self.BackPointer = v_ptr32()
class DEVICE_RELATIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.Objects = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class _unnamed_14532(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Port = _unnamed_16299()
class BATTERY_REPORTING_SCALE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Granularity = v_uint32()
self.Capacity = v_uint32()
class MMPAGING_FILE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint32()
self.MaximumSize = v_uint32()
self.MinimumSize = v_uint32()
self.FreeSpace = v_uint32()
self.CurrentUsage = v_uint32()
self.PeakUsage = v_uint32()
self.Hint = v_uint32()
self.HighestPage = v_uint32()
self.Entry = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.Bitmap = v_ptr32()
self.File = v_ptr32()
self.PageFileName = UNICODE_STRING()
self.PageFileNumber = v_uint32()
self.Extended = v_uint8()
self.HintSetToZero = v_uint8()
self.BootPartition = v_uint8()
self._pad0040 = v_bytes(size=1)
self.FileHandle = v_ptr32()
class _unnamed_16200(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WhichSpace = v_uint32()
self.Buffer = v_ptr32()
self.Offset = v_uint32()
self.Length = v_uint32()
class STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class _unnamed_16205(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = v_uint8()
class FNSAVE_FORMAT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint32()
self.StatusWord = v_uint32()
self.TagWord = v_uint32()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(80) ])
class CMP_OFFSET_ARRAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = v_uint32()
self.DataBuffer = v_ptr32()
self.DataLength = v_uint32()
class CM_KEY_VALUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.NameLength = v_uint16()
self.DataLength = v_uint32()
self.Data = v_uint32()
self.Type = v_uint32()
self.Flags = v_uint16()
self.Spare = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0018 = v_bytes(size=2)
class MMVAD_FLAGS2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = v_uint32()
class LIST_ENTRY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
self.Blink = v_uint32()
class MMWSLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_13252()
class DBGKD_BREAKPOINTEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointCount = v_uint32()
self.ContinueStatus = v_uint32()
class FILE_NETWORK_OPEN_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0038 = v_bytes(size=4)
class PCI_SECONDARY_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = SINGLE_LIST_ENTRY()
self.ExtensionType = v_uint32()
self.Destructor = v_ptr32()
class DBGKD_QUERY_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Address = v_uint64()
self.Reserved = v_uint64()
self.AddressSpace = v_uint32()
self.Flags = v_uint32()
class PCI_SLOT_NUMBER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = _unnamed_14357()
class _unnamed_16115(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_ptr32()
self.Key = v_uint32()
self.ByteOffset = LARGE_INTEGER()
class KDEVICE_QUEUE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceListEntry = LIST_ENTRY()
self.SortKey = v_uint32()
self.Inserted = v_uint8()
self._pad0010 = v_bytes(size=3)
class LIST_ENTRY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint64()
self.Blink = v_uint64()
class MMPTE_SUBSECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PO_DEVICE_NOTIFY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Link = LIST_ENTRY()
self.TargetDevice = v_ptr32()
self.WakeNeeded = v_uint8()
self.OrderLevel = v_uint8()
self._pad0010 = v_bytes(size=2)
self.DeviceObject = v_ptr32()
self.Node = v_ptr32()
self.DeviceName = v_ptr32()
self.DriverName = v_ptr32()
self.ChildCount = v_uint32()
self.ActiveChild = v_uint32()
class HMAP_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Directory = vstruct.VArray([ v_ptr32() for i in xrange(1024) ])
class _unnamed_13150(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
class _unnamed_13151(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Blink = v_uint32()
class _unnamed_13152(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e1 = MMPFNENTRY()
class HEAP_STOP_ON_VALUES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocAddress = v_uint32()
self.AllocTag = HEAP_STOP_ON_TAG()
self.ReAllocAddress = v_uint32()
self.ReAllocTag = HEAP_STOP_ON_TAG()
self.FreeAddress = v_uint32()
self.FreeTag = HEAP_STOP_ON_TAG()
class WMI_BUFFER_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Wnode = WNODE_HEADER()
self.Offset = v_uint32()
self.EventsLost = v_uint32()
self.InstanceGuid = GUID()
class RTL_HANDLE_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
class ARBITER_ALTERNATIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Minimum = v_uint64()
self.Maximum = v_uint64()
self.Length = v_uint32()
self.Alignment = v_uint32()
self.Priority = v_uint32()
self.Flags = v_uint32()
self.Descriptor = v_ptr32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
class EX_FAST_REF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Object = v_ptr32()
class INTERLOCK_SEQ(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Depth = v_uint16()
self.FreeEntryOffset = v_uint16()
self.Sequence = v_uint32()
class HMAP_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Table = vstruct.VArray([ HMAP_ENTRY() for i in xrange(512) ])
class KSPIN_LOCK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Lock = v_ptr32()
class _unnamed_12918(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DockStatus = v_uint32()
self.ListEntry = LIST_ENTRY()
self.SerialNumber = v_ptr32()
class FS_FILTER_CALLBACKS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbacks = v_uint32()
self.Reserved = v_uint32()
self.PreAcquireForSectionSynchronization = v_ptr32()
self.PostAcquireForSectionSynchronization = v_ptr32()
self.PreReleaseForSectionSynchronization = v_ptr32()
self.PostReleaseForSectionSynchronization = v_ptr32()
self.PreAcquireForCcFlush = v_ptr32()
self.PostAcquireForCcFlush = v_ptr32()
self.PreReleaseForCcFlush = v_ptr32()
self.PostReleaseForCcFlush = v_ptr32()
self.PreAcquireForModifiedPageWriter = v_ptr32()
self.PostAcquireForModifiedPageWriter = v_ptr32()
self.PreReleaseForModifiedPageWriter = v_ptr32()
self.PostReleaseForModifiedPageWriter = v_ptr32()
class HANDLE_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Object = v_ptr32()
self.GrantedAccess = v_uint32()
class IO_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Option = v_uint8()
self.Type = v_uint8()
self.ShareDisposition = v_uint8()
self.Spare1 = v_uint8()
self.Flags = v_uint16()
self.Spare2 = v_uint16()
self.u = _unnamed_14532()
class _unnamed_12917(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextResourceDeviceNode = v_ptr32()
class _unnamed_12916(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LegacyDeviceNode = v_ptr32()
class THERMAL_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ThermalStamp = v_uint32()
self.ThermalConstant1 = v_uint32()
self.ThermalConstant2 = v_uint32()
self.Processors = v_uint32()
self.SamplingPeriod = v_uint32()
self.CurrentTemperature = v_uint32()
self.PassiveTripPoint = v_uint32()
self.CriticalTripPoint = v_uint32()
self.ActiveTripPointCount = v_uint8()
self._pad0024 = v_bytes(size=3)
self.ActiveTripPoint = vstruct.VArray([ v_uint32() for i in xrange(10) ])
class IMAGE_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_uint16()
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.ImageBase = v_uint32()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint32()
self.SizeOfStackCommit = v_uint32()
self.SizeOfHeapReserve = v_uint32()
self.SizeOfHeapCommit = v_uint32()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([ IMAGE_DATA_DIRECTORY() for i in xrange(16) ])
class SCSI_REQUEST_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class OBJECT_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.RootDirectory = v_ptr32()
self.ObjectName = v_ptr32()
self.Attributes = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.SecurityQualityOfService = v_ptr32()
class SUBSECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlArea = v_ptr32()
self.u = _unnamed_12531()
self.StartingSector = v_uint32()
self.NumberOfFullSectors = v_uint32()
self.SubsectionBase = v_ptr32()
self.UnusedPtes = v_uint32()
self.PtesInSubsection = v_uint32()
self.NextSubsection = v_ptr32()
class ETHREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Tcb = KTHREAD()
self.CreateTime = LARGE_INTEGER()
self.ExitTime = LARGE_INTEGER()
self.ExitStatus = v_uint32()
self.PostBlockList = LIST_ENTRY()
self.TerminationPort = v_ptr32()
self.ActiveTimerListLock = v_uint32()
self.ActiveTimerListHead = LIST_ENTRY()
self.Cid = CLIENT_ID()
self.LpcReplySemaphore = KSEMAPHORE()
self.LpcReplyMessage = v_ptr32()
self.ImpersonationInfo = v_ptr32()
self.IrpList = LIST_ENTRY()
self.TopLevelIrp = v_uint32()
self.DeviceToVerify = v_ptr32()
self.ThreadsProcess = v_ptr32()
self.StartAddress = v_ptr32()
self.Win32StartAddress = v_ptr32()
self.ThreadListEntry = LIST_ENTRY()
self.RundownProtect = EX_RUNDOWN_REF()
self.ThreadLock = EX_PUSH_LOCK()
self.LpcReplyMessageId = v_uint32()
self.ReadClusterSize = v_uint32()
self.GrantedAccess = v_uint32()
self.CrossThreadFlags = v_uint32()
self.SameThreadPassiveFlags = v_uint32()
self.SameThreadApcFlags = v_uint32()
self.ForwardClusterOnly = v_uint8()
self.DisablePageFaultClustering = v_uint8()
self._pad0258 = v_bytes(size=2)
class _unnamed_16158(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_ptr32()
self.Size = v_uint16()
self.Version = v_uint16()
self.Interface = v_ptr32()
self.InterfaceSpecificData = v_ptr32()
class FAST_MUTEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.Owner = v_ptr32()
self.Contention = v_uint32()
self.Event = KEVENT()
self.OldIrql = v_uint32()
class _unnamed_16156(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
class MM_SESSION_SPACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReferenceCount = v_uint32()
self.u = _unnamed_13227()
self.SessionId = v_uint32()
self.SessionPageDirectoryIndex = v_uint32()
self.GlobalVirtualAddress = v_ptr32()
self.ProcessList = LIST_ENTRY()
self.NonPagedPoolBytes = v_uint32()
self.PagedPoolBytes = v_uint32()
self.NonPagedPoolAllocations = v_uint32()
self.PagedPoolAllocations = v_uint32()
self.NonPagablePages = v_uint32()
self.CommittedPages = v_uint32()
self._pad0038 = v_bytes(size=4)
self.LastProcessSwappedOutTime = LARGE_INTEGER()
self.PageTables = v_ptr32()
self.PagedPoolMutex = FAST_MUTEX()
self.PagedPoolStart = v_ptr32()
self.PagedPoolEnd = v_ptr32()
self.PagedPoolBasePde = v_ptr32()
self.PagedPoolInfo = MM_PAGED_POOL_INFO()
self.Color = v_uint32()
self.ProcessOutSwapCount = v_uint32()
self.ImageList = LIST_ENTRY()
self.GlobalPteEntry = v_ptr32()
self.CopyOnWriteCount = v_uint32()
self.SessionPoolAllocationFailures = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.AttachCount = v_uint32()
self.AttachEvent = KEVENT()
self.LastProcess = v_ptr32()
self._pad00d8 = v_bytes(size=4)
self.Vm = MMSUPPORT()
self.Wsle = v_ptr32()
self.WsLock = ERESOURCE()
self.WsListEntry = LIST_ENTRY()
self.Session = MMSESSION()
self.Win32KDriverObject = DRIVER_OBJECT()
self.WorkingSetLockOwner = v_ptr32()
self.PagedPool = POOL_DESCRIPTOR()
self.ProcessReferenceToSession = v_uint32()
self.LocaleId = v_uint32()
self._pad1278 = v_bytes(size=4)
class CM_NAME_CONTROL_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Compressed = v_uint8()
self._pad0002 = v_bytes(size=1)
self.RefCount = v_uint16()
self.NameHash = CM_NAME_HASH()
class _unnamed_16016(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.Reserved = v_uint16()
self.ShareAccess = v_uint16()
self.Parameters = v_ptr32()
class _unnamed_13534(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
class KDEVICE_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceListHead = LIST_ENTRY()
self.Lock = v_uint32()
self.Busy = v_uint8()
self._pad0014 = v_bytes(size=3)
class IO_COUNTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadOperationCount = v_uint64()
self.WriteOperationCount = v_uint64()
self.OtherOperationCount = v_uint64()
self.ReadTransferCount = v_uint64()
self.WriteTransferCount = v_uint64()
self.OtherTransferCount = v_uint64()
class _unnamed_16380(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataLength = v_uint16()
self.TotalLength = v_uint16()
class PCI_BUS_INTERFACE_STANDARD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
self.ReadConfig = v_ptr32()
self.WriteConfig = v_ptr32()
self.PinToLine = v_ptr32()
self.LineToPin = v_ptr32()
class PORT_MESSAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_15734()
self.u2 = _unnamed_15735()
self.ClientId = CLIENT_ID()
self.MessageId = v_uint32()
self.ClientViewSize = v_uint32()
class _unnamed_16385(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.DataInfoOffset = v_uint16()
class PCI_COMMON_CONFIG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VendorID = v_uint16()
self.DeviceID = v_uint16()
self.Command = v_uint16()
self.Status = v_uint16()
self.RevisionID = v_uint8()
self.ProgIf = v_uint8()
self.SubClass = v_uint8()
self.BaseClass = v_uint8()
self.CacheLineSize = v_uint8()
self.LatencyTimer = v_uint8()
self.HeaderType = v_uint8()
self.BIST = v_uint8()
self.u = _unnamed_14629()
self.DeviceSpecific = vstruct.VArray([ v_uint8() for i in xrange(192) ])
class IO_SECURITY_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityQos = v_ptr32()
self.AccessState = v_ptr32()
self.DesiredAccess = v_uint32()
self.FullCreateOptions = v_uint32()
class TERMINATION_PORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Port = v_ptr32()
class IO_CLIENT_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextExtension = v_ptr32()
self.ClientIdentificationAddress = v_ptr32()
class INITIAL_PRIVILEGE_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrivilegeCount = v_uint32()
self.Control = v_uint32()
self.Privilege = vstruct.VArray([ LUID_AND_ATTRIBUTES() for i in xrange(3) ])
class PCI_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Atom = v_uint32()
self.OldIrql = v_uint8()
self._pad0008 = v_bytes(size=3)
class POOL_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PoolType = v_uint32()
self.PoolIndex = v_uint32()
self.RunningAllocs = v_uint32()
self.RunningDeAllocs = v_uint32()
self.TotalPages = v_uint32()
self.TotalBigPages = v_uint32()
self.Threshold = v_uint32()
self.LockAddress = v_ptr32()
self.PendingFrees = v_ptr32()
self.PendingFreeDepth = v_uint32()
self.ListHeads = vstruct.VArray([ LIST_ENTRY() for i in xrange(512) ])
class DBGKD_QUERY_SPECIAL_CALLS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NumberOfSpecialCalls = v_uint32()
class HEAP_UNCOMMMTTED_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Address = v_uint32()
self.Size = v_uint32()
self.filler = v_uint32()
class HMAP_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BlockAddress = v_uint32()
self.BinAddress = v_uint32()
self.CmView = v_ptr32()
self.MemAlloc = v_uint32()
class DUMP_STACK_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Init = DUMP_INITIALIZATION_CONTEXT()
self.PartitionOffset = LARGE_INTEGER()
self.DumpPointers = v_ptr32()
self.PointersLength = v_uint32()
self.ModulePrefix = v_ptr32()
self.DriverList = LIST_ENTRY()
self.InitMsg = STRING()
self.ProgMsg = STRING()
self.DoneMsg = STRING()
self.FileObject = v_ptr32()
self.UsageType = v_uint32()
self._pad00b0 = v_bytes(size=4)
class PNP_DEVICE_EVENT_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Status = v_uint32()
self.EventQueueMutex = KMUTANT()
self.Lock = FAST_MUTEX()
self.List = LIST_ENTRY()
class PROCESSOR_IDLE_TIMES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartTime = v_uint64()
self.EndTime = v_uint64()
self.IdleHandlerReserved = vstruct.VArray([ v_uint32() for i in xrange(4) ])
class KWAIT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WaitListEntry = LIST_ENTRY()
self.Thread = v_ptr32()
self.Object = v_ptr32()
self.NextWaitBlock = v_ptr32()
self.WaitKey = v_uint16()
self.WaitType = v_uint16()
class DBGKD_READ_WRITE_IO32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.IoAddress = v_uint32()
self.DataValue = v_uint32()
class POP_HIBER_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WriteToFile = v_uint8()
self.ReserveLoaderMemory = v_uint8()
self.ReserveFreeMemory = v_uint8()
self.VerifyOnWake = v_uint8()
self.Reset = v_uint8()
self.HiberFlags = v_uint8()
self.LinkFile = v_uint8()
self._pad0008 = v_bytes(size=1)
self.LinkFileHandle = v_ptr32()
self.Lock = v_uint32()
self.MapFrozen = v_uint8()
self._pad0014 = v_bytes(size=3)
self.MemoryMap = RTL_BITMAP()
self.ClonedRanges = LIST_ENTRY()
self.ClonedRangeCount = v_uint32()
self.NextCloneRange = v_ptr32()
self.NextPreserve = v_uint32()
self.LoaderMdl = v_ptr32()
self.Clones = v_ptr32()
self.NextClone = v_ptr32()
self.NoClones = v_uint32()
self.Spares = v_ptr32()
self._pad0048 = v_bytes(size=4)
self.PagesOut = v_uint64()
self.IoPage = v_ptr32()
self.CurrentMcb = v_ptr32()
self.DumpStack = v_ptr32()
self.WakeState = v_ptr32()
self.NoRanges = v_uint32()
self.HiberVa = v_uint32()
self.HiberPte = LARGE_INTEGER()
self.Status = v_uint32()
self.MemoryImage = v_ptr32()
self.TableHead = v_ptr32()
self.CompressionWorkspace = v_ptr32()
self.CompressedWriteBuffer = v_ptr32()
self.PerformanceStats = v_ptr32()
self.CompressionBlock = v_ptr32()
self.DmaIO = v_ptr32()
self.TemporaryHeap = v_ptr32()
self._pad0098 = v_bytes(size=4)
self.PerfInfo = PO_HIBER_PERF()
class _unnamed_16128(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityInformation = v_uint32()
self.SecurityDescriptor = v_ptr32()
class PS_JOB_TOKEN_FILTER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CapturedSidCount = v_uint32()
self.CapturedSids = v_ptr32()
self.CapturedSidsLength = v_uint32()
self.CapturedGroupCount = v_uint32()
self.CapturedGroups = v_ptr32()
self.CapturedGroupsLength = v_uint32()
self.CapturedPrivilegeCount = v_uint32()
self.CapturedPrivileges = v_ptr32()
self.CapturedPrivilegesLength = v_uint32()
class CALL_HASH_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.CallersAddress = v_ptr32()
self.CallersCaller = v_ptr32()
self.CallCount = v_uint32()
class _unnamed_16125(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityInformation = v_uint32()
self.Length = v_uint32()
class TOKEN_CONTROL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TokenId = LUID()
self.AuthenticationId = LUID()
self.ModifiedId = LUID()
self.TokenSource = TOKEN_SOURCE()
class _unnamed_16120(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OutputBufferLength = v_uint32()
self.InputBufferLength = v_uint32()
self.IoControlCode = v_uint32()
self.Type3InputBuffer = v_ptr32()
class _unnamed_16554(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ArbitrationList = v_ptr32()
self.AllocateFromCount = v_uint32()
self.AllocateFrom = v_ptr32()
class PCI_COMMON_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
class HEAP_USERDATA_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SFreeListEntry = SINGLE_LIST_ENTRY()
self.HeapHandle = v_ptr32()
self.SizeIndex = v_uint32()
self.Signature = v_uint32()
class _unnamed_16559(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ArbitrationList = v_ptr32()
class RTL_DRIVE_LETTER_CURDIR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint16()
self.Length = v_uint16()
self.TimeStamp = v_uint32()
self.DosPath = STRING()
class ULARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_15734(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.s1 = _unnamed_16380()
class _unnamed_15735(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.s2 = _unnamed_16385()
class TEB_ACTIVE_FRAME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.Previous = v_ptr32()
self.Context = v_ptr32()
class ETIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeTimer = KTIMER()
self.TimerApc = KAPC()
self.TimerDpc = KDPC()
self.ActiveTimerListEntry = LIST_ENTRY()
self.Lock = v_uint32()
self.Period = v_uint32()
self.ApcAssociated = v_uint8()
self.WakeTimer = v_uint8()
self._pad008c = v_bytes(size=2)
self.WakeTimerListEntry = LIST_ENTRY()
self._pad0098 = v_bytes(size=4)
class GENERAL_LOOKASIDE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = SLIST_HEADER()
self.Depth = v_uint16()
self.MaximumDepth = v_uint16()
self.TotalAllocates = v_uint32()
self.AllocateMisses = v_uint32()
self.TotalFrees = v_uint32()
self.FreeMisses = v_uint32()
self.Type = v_uint32()
self.Tag = v_uint32()
self.Size = v_uint32()
self.Allocate = v_ptr32()
self.Free = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.LastTotalAllocates = v_uint32()
self.LastAllocateMisses = v_uint32()
self.Future = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0080 = v_bytes(size=56)
class PHYSICAL_MEMORY_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NumberOfRuns = v_uint32()
self.NumberOfPages = v_uint32()
self.Run = vstruct.VArray([ PHYSICAL_MEMORY_RUN() for i in xrange(1) ])
class ARBITER_ORDERING_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint16()
self.Maximum = v_uint16()
self.Orderings = v_ptr32()
class OBJECT_DIRECTORY_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ChainLink = v_ptr32()
self.Object = v_ptr32()
class CM_KEY_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConvKey = v_uint32()
self.NextHash = v_ptr32()
self.KeyHive = v_ptr32()
self.KeyCell = v_uint32()
class ARBITER_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.AlternativeCount = v_uint32()
self.Alternatives = v_ptr32()
self.PhysicalDeviceObject = v_ptr32()
self.RequestSource = v_uint32()
self.Flags = v_uint32()
self.WorkSpace = v_uint32()
self.InterfaceType = v_uint32()
self.SlotNumber = v_uint32()
self.BusNumber = v_uint32()
self.Assignment = v_ptr32()
self.SelectedAlternative = v_ptr32()
self.Result = v_uint32()
class PROCESSOR_PERF_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PercentFrequency = v_uint8()
self.MinCapacity = v_uint8()
self.Power = v_uint16()
self.IncreaseLevel = v_uint8()
self.DecreaseLevel = v_uint8()
self.Flags = v_uint16()
self.IncreaseTime = v_uint32()
self.DecreaseTime = v_uint32()
self.IncreaseCount = v_uint32()
self.DecreaseCount = v_uint32()
self.PerformanceTime = v_uint64()
class KGDTENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LimitLow = v_uint16()
self.BaseLow = v_uint16()
self.HighWord = _unnamed_13092()
class MMPFNENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Modified = v_uint32()
class NT_TIB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionList = v_ptr32()
self.StackBase = v_ptr32()
self.StackLimit = v_ptr32()
self.SubSystemTib = v_ptr32()
self.FiberData = v_ptr32()
self.ArbitraryUserPointer = v_ptr32()
self.Self = v_ptr32()
class POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemState = v_uint32()
class UNICODE_STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class CELL_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = u()
class MMSESSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemSpaceViewLock = FAST_MUTEX()
self.SystemSpaceViewLockPointer = v_ptr32()
self.SystemSpaceViewStart = v_ptr32()
self.SystemSpaceViewTable = v_ptr32()
self.SystemSpaceHashSize = v_uint32()
self.SystemSpaceHashEntries = v_uint32()
self.SystemSpaceHashKey = v_uint32()
self.SystemSpaceBitMap = v_ptr32()
class _unnamed_16230(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerState = v_uint32()
class _unnamed_16236(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerSequence = v_ptr32()
class PEB_FREE_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Size = v_uint32()
class MMFREE_POOL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
self.Size = v_uint32()
self.Signature = v_uint32()
self.Owner = v_ptr32()
class EPROCESS_QUOTA_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.QuotaEntry = vstruct.VArray([ EPROCESS_QUOTA_ENTRY() for i in xrange(3) ])
self.QuotaList = LIST_ENTRY()
self.ReferenceCount = v_uint32()
self.ProcessCount = v_uint32()
class FXSAVE_FORMAT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint16()
self.StatusWord = v_uint16()
self.TagWord = v_uint16()
self.ErrorOpcode = v_uint16()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.MXCsr = v_uint32()
self.MXCsrMask = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(128) ])
self.Reserved3 = vstruct.VArray([ v_uint8() for i in xrange(128) ])
self.Reserved4 = vstruct.VArray([ v_uint8() for i in xrange(224) ])
self.Align16Byte = vstruct.VArray([ v_uint8() for i in xrange(8) ])
class BUS_HANDLER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint32()
self.InterfaceType = v_uint32()
self.ConfigurationType = v_uint32()
self.BusNumber = v_uint32()
self.DeviceObject = v_ptr32()
self.ParentHandler = v_ptr32()
self.BusData = v_ptr32()
self.DeviceControlExtensionSize = v_uint32()
self.BusAddresses = v_ptr32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.GetBusData = v_ptr32()
self.SetBusData = v_ptr32()
self.AdjustResourceList = v_ptr32()
self.AssignSlotResources = v_ptr32()
self.GetInterruptVector = v_ptr32()
self.TranslateBusAddress = v_ptr32()
self.Spare1 = v_ptr32()
self.Spare2 = v_ptr32()
self.Spare3 = v_ptr32()
self.Spare4 = v_ptr32()
self.Spare5 = v_ptr32()
self.Spare6 = v_ptr32()
self.Spare7 = v_ptr32()
self.Spare8 = v_ptr32()
class OBJECT_HEADER_NAME_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Directory = v_ptr32()
self.Name = UNICODE_STRING()
self.QueryReferences = v_uint32()
class PEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InheritedAddressSpace = v_uint8()
self.ReadImageFileExecOptions = v_uint8()
self.BeingDebugged = v_uint8()
self.SpareBool = v_uint8()
self.Mutant = v_ptr32()
self.ImageBaseAddress = v_ptr32()
self.Ldr = v_ptr32()
self.ProcessParameters = v_ptr32()
self.SubSystemData = v_ptr32()
self.ProcessHeap = v_ptr32()
self.FastPebLock = v_ptr32()
self.FastPebLockRoutine = v_ptr32()
self.FastPebUnlockRoutine = v_ptr32()
self.EnvironmentUpdateCount = v_uint32()
self.KernelCallbackTable = v_ptr32()
self.SystemReserved = vstruct.VArray([ v_uint32() for i in xrange(1) ])
self.AtlThunkSListPtr32 = v_uint32()
self.FreeList = v_ptr32()
self.TlsExpansionCounter = v_uint32()
self.TlsBitmap = v_ptr32()
self.TlsBitmapBits = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ReadOnlySharedMemoryBase = v_ptr32()
self.ReadOnlySharedMemoryHeap = v_ptr32()
self.ReadOnlyStaticServerData = v_ptr32()
self.AnsiCodePageData = v_ptr32()
self.OemCodePageData = v_ptr32()
self.UnicodeCaseTableData = v_ptr32()
self.NumberOfProcessors = v_uint32()
self.NtGlobalFlag = v_uint32()
self._pad0070 = v_bytes(size=4)
self.CriticalSectionTimeout = LARGE_INTEGER()
self.HeapSegmentReserve = v_uint32()
self.HeapSegmentCommit = v_uint32()
self.HeapDeCommitTotalFreeThreshold = v_uint32()
self.HeapDeCommitFreeBlockThreshold = v_uint32()
self.NumberOfHeaps = v_uint32()
self.MaximumNumberOfHeaps = v_uint32()
self.ProcessHeaps = v_ptr32()
self.GdiSharedHandleTable = v_ptr32()
self.ProcessStarterHelper = v_ptr32()
self.GdiDCAttributeList = v_uint32()
self.LoaderLock = v_ptr32()
self.OSMajorVersion = v_uint32()
self.OSMinorVersion = v_uint32()
self.OSBuildNumber = v_uint16()
self.OSCSDVersion = v_uint16()
self.OSPlatformId = v_uint32()
self.ImageSubsystem = v_uint32()
self.ImageSubsystemMajorVersion = v_uint32()
self.ImageSubsystemMinorVersion = v_uint32()
self.ImageProcessAffinityMask = v_uint32()
self.GdiHandleBuffer = vstruct.VArray([ v_uint32() for i in xrange(34) ])
self.PostProcessInitRoutine = v_ptr32()
self.TlsExpansionBitmap = v_ptr32()
self.TlsExpansionBitmapBits = vstruct.VArray([ v_uint32() for i in xrange(32) ])
self.SessionId = v_uint32()
self.AppCompatFlags = ULARGE_INTEGER()
self.AppCompatFlagsUser = ULARGE_INTEGER()
self.pShimData = v_ptr32()
self.AppCompatInfo = v_ptr32()
self.CSDVersion = UNICODE_STRING()
self.ActivationContextData = v_ptr32()
self.ProcessAssemblyStorageMap = v_ptr32()
self.SystemDefaultActivationContextData = v_ptr32()
self.SystemAssemblyStorageMap = v_ptr32()
self.MinimumStackCommit = v_uint32()
self._pad0210 = v_bytes(size=4)
class DBGKD_ANY_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.X86ControlSet = X86_DBGKD_CONTROL_SET()
self._pad001c = v_bytes(size=12)
class MMSUPPORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LastTrimTime = LARGE_INTEGER()
self.Flags = MMSUPPORT_FLAGS()
self.PageFaultCount = v_uint32()
self.PeakWorkingSetSize = v_uint32()
self.WorkingSetSize = v_uint32()
self.MinimumWorkingSetSize = v_uint32()
self.MaximumWorkingSetSize = v_uint32()
self.VmWorkingSetList = v_ptr32()
self.WorkingSetExpansionLinks = LIST_ENTRY()
self.Claim = v_uint32()
self.NextEstimationSlot = v_uint32()
self.NextAgingSlot = v_uint32()
self.EstimatedAvailable = v_uint32()
self.GrowthSinceLastEstimate = v_uint32()
class HBASE_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.Sequence1 = v_uint32()
self.Sequence2 = v_uint32()
self.TimeStamp = LARGE_INTEGER()
self.Major = v_uint32()
self.Minor = v_uint32()
self.Type = v_uint32()
self.Format = v_uint32()
self.RootCell = v_uint32()
self.Length = v_uint32()
self.Cluster = v_uint32()
self.FileName = vstruct.VArray([ v_uint8() for i in xrange(64) ])
self.Reserved1 = vstruct.VArray([ v_uint32() for i in xrange(99) ])
self.CheckSum = v_uint32()
self.Reserved2 = vstruct.VArray([ v_uint32() for i in xrange(894) ])
self.BootType = v_uint32()
self.BootRecover = v_uint32()
class BUS_EXTENSION_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.BusExtension = v_ptr32()
class DBGKD_GET_SET_BUS_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BusDataType = v_uint32()
self.BusNumber = v_uint32()
self.SlotNumber = v_uint32()
self.Offset = v_uint32()
self.Length = v_uint32()
class KDPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Number = v_uint8()
self.Importance = v_uint8()
self.DpcListEntry = LIST_ENTRY()
self.DeferredRoutine = v_ptr32()
self.DeferredContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.Lock = v_ptr32()
class KEVENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
class KSEMAPHORE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.Limit = v_uint32()
class PCI_ARBITER_INSTANCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = PCI_SECONDARY_EXTENSION()
self.Interface = v_ptr32()
self.BusFdoExtension = v_ptr32()
self.InstanceName = vstruct.VArray([ v_uint16() for i in xrange(24) ])
self.CommonInstance = ARBITER_INSTANCE()
class PI_RESOURCE_ARBITER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceArbiterList = LIST_ENTRY()
self.ResourceType = v_uint8()
self._pad000c = v_bytes(size=3)
self.ArbiterInterface = v_ptr32()
self.Level = v_uint32()
self.ResourceList = LIST_ENTRY()
self.BestResourceList = LIST_ENTRY()
self.BestConfig = LIST_ENTRY()
self.ActiveArbiterList = LIST_ENTRY()
self.State = v_uint8()
self.ResourcesChanged = v_uint8()
self._pad0038 = v_bytes(size=2)
class OBJECT_TYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mutex = ERESOURCE()
self.TypeList = LIST_ENTRY()
self.Name = UNICODE_STRING()
self.DefaultObject = v_ptr32()
self.Index = v_uint32()
self.TotalNumberOfObjects = v_uint32()
self.TotalNumberOfHandles = v_uint32()
self.HighWaterNumberOfObjects = v_uint32()
self.HighWaterNumberOfHandles = v_uint32()
self.TypeInfo = OBJECT_TYPE_INITIALIZER()
self.Key = v_uint32()
self.ObjectLocks = vstruct.VArray([ ERESOURCE() for i in xrange(4) ])
class DBGKD_SET_INTERNAL_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint32()
self.Flags = v_uint32()
class POP_THERMAL_ZONE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Link = LIST_ENTRY()
self.State = v_uint8()
self.Flags = v_uint8()
self.Mode = v_uint8()
self.PendingMode = v_uint8()
self.ActivePoint = v_uint8()
self.PendingActivePoint = v_uint8()
self._pad0010 = v_bytes(size=2)
self.Throttle = v_uint32()
self._pad0018 = v_bytes(size=4)
self.LastTime = v_uint64()
self.SampleRate = v_uint32()
self.LastTemp = v_uint32()
self.PassiveTimer = KTIMER()
self.PassiveDpc = KDPC()
self.OverThrottled = POP_ACTION_TRIGGER()
self.Irp = v_ptr32()
self.Info = THERMAL_INFORMATION()
self._pad00d0 = v_bytes(size=4)
class POOL_HACKER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = POOL_HEADER()
self.Contents = vstruct.VArray([ v_uint32() for i in xrange(8) ])
class HANDLE_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TableCode = v_uint32()
self.QuotaProcess = v_ptr32()
self.UniqueProcessId = v_ptr32()
self.HandleTableLock = vstruct.VArray([ EX_PUSH_LOCK() for i in xrange(4) ])
self.HandleTableList = LIST_ENTRY()
self.HandleContentionEvent = EX_PUSH_LOCK()
self.DebugInfo = v_ptr32()
self.ExtraInfoPages = v_uint32()
self.FirstFree = v_uint32()
self.LastFree = v_uint32()
self.NextHandleNeedingPool = v_uint32()
self.HandleCount = v_uint32()
self.Flags = v_uint32()
class PO_HIBER_PERF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoTicks = v_uint64()
self.InitTicks = v_uint64()
self.CopyTicks = v_uint64()
self.StartCount = v_uint64()
self.ElapsedTime = v_uint32()
self.IoTime = v_uint32()
self.CopyTime = v_uint32()
self.InitTime = v_uint32()
self.PagesWritten = v_uint32()
self.PagesProcessed = v_uint32()
self.BytesCopied = v_uint32()
self.DumpCount = v_uint32()
self.FileRuns = v_uint32()
self._pad0048 = v_bytes(size=4)
class DEFERRED_WRITE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeByteSize = v_uint16()
self.FileObject = v_ptr32()
self.BytesToWrite = v_uint32()
self.DeferredWriteLinks = LIST_ENTRY()
self.Event = v_ptr32()
self.PostRoutine = v_ptr32()
self.Context1 = v_ptr32()
self.Context2 = v_ptr32()
self.LimitModifiedPages = v_uint8()
self._pad0028 = v_bytes(size=3)
class ARBITER_INSTANCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.MutexEvent = v_ptr32()
self.Name = v_ptr32()
self.ResourceType = v_uint32()
self.Allocation = v_ptr32()
self.PossibleAllocation = v_ptr32()
self.OrderingList = ARBITER_ORDERING_LIST()
self.ReservedList = ARBITER_ORDERING_LIST()
self.ReferenceCount = v_uint32()
self.Interface = v_ptr32()
self.AllocationStackMaxSize = v_uint32()
self.AllocationStack = v_ptr32()
self.UnpackRequirement = v_ptr32()
self.PackResource = v_ptr32()
self.UnpackResource = v_ptr32()
self.ScoreRequirement = v_ptr32()
self.TestAllocation = v_ptr32()
self.RetestAllocation = v_ptr32()
self.CommitAllocation = v_ptr32()
self.RollbackAllocation = v_ptr32()
self.BootAllocation = v_ptr32()
self.QueryArbitrate = v_ptr32()
self.QueryConflict = v_ptr32()
self.AddReserved = v_ptr32()
self.StartArbiter = v_ptr32()
self.PreprocessEntry = v_ptr32()
self.AllocateEntry = v_ptr32()
self.GetNextAllocationRange = v_ptr32()
self.FindSuitableRange = v_ptr32()
self.AddAllocation = v_ptr32()
self.BacktrackAllocation = v_ptr32()
self.OverrideConflict = v_ptr32()
self.TransactionInProgress = v_uint8()
self._pad008c = v_bytes(size=3)
self.Extension = v_ptr32()
self.BusDeviceObject = v_ptr32()
self.ConflictCallbackContext = v_ptr32()
self.ConflictCallback = v_ptr32()
class MMMOD_WRITER_LISTHEAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Event = KEVENT()
class NAMED_PIPE_CREATE_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NamedPipeType = v_uint32()
self.ReadMode = v_uint32()
self.CompletionMode = v_uint32()
self.MaximumInstances = v_uint32()
self.InboundQuota = v_uint32()
self.OutboundQuota = v_uint32()
self.DefaultTimeout = LARGE_INTEGER()
self.TimeoutSpecified = v_uint8()
self._pad0028 = v_bytes(size=7)
class POP_IDLE_HANDLER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Latency = v_uint32()
self.TimeCheck = v_uint32()
self.DemoteLimit = v_uint32()
self.PromoteLimit = v_uint32()
self.PromoteCount = v_uint32()
self.Demote = v_uint8()
self.Promote = v_uint8()
self.PromotePercent = v_uint8()
self.DemotePercent = v_uint8()
self.State = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.IdleFunction = v_ptr32()
class MMSUPPORT_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SessionSpace = v_uint32()
class HEAP_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = _unnamed_12162()
class EXCEPTION_REGISTRATION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Handler = v_ptr32()
class FILE_BASIC_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0028 = v_bytes(size=4)
class PLUGPLAY_EVENT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EventGuid = GUID()
self.EventCategory = v_uint32()
self.Result = v_ptr32()
self.Flags = v_uint32()
self.TotalSize = v_uint32()
self.DeviceObject = v_ptr32()
self.u = _unnamed_15795()
class LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_ptr32()
self.Blink = v_ptr32()
class CM_KEY_SECURITY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Reserved = v_uint16()
self.Flink = v_uint32()
self.Blink = v_uint32()
self.ReferenceCount = v_uint32()
self.DescriptorLength = v_uint32()
self.Descriptor = SECURITY_DESCRIPTOR_RELATIVE()
class _unnamed_14637(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = LARGE_INTEGER()
self.Length = v_uint32()
class _unnamed_14395(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Acquired = v_uint8()
self.CacheLineSize = v_uint8()
self.LatencyTimer = v_uint8()
self.EnablePERR = v_uint8()
self.EnableSERR = v_uint8()
class CLIENT_ID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UniqueProcess = v_ptr32()
self.UniqueThread = v_ptr32()
class POP_ACTION_TRIGGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.Flags = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Battery = _unnamed_13534()
class CM_CACHED_VALUE_INDEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CellIndex = v_uint32()
self.Data = _unnamed_13383()
class DEVICE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DosDevicesDirectory = v_ptr32()
self.GlobalDosDevicesDirectory = v_ptr32()
self.ReferenceCount = v_uint32()
self.DriveMap = v_uint32()
self.DriveType = vstruct.VArray([ v_uint8() for i in xrange(32) ])
class CONTROL_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Segment = v_ptr32()
self.DereferenceList = LIST_ENTRY()
self.NumberOfSectionReferences = v_uint32()
self.NumberOfPfnReferences = v_uint32()
self.NumberOfMappedViews = v_uint32()
self.NumberOfSubsections = v_uint16()
self.FlushInProgressCount = v_uint16()
self.NumberOfUserReferences = v_uint32()
self.u = _unnamed_12520()
self.FilePointer = v_ptr32()
self.WaitingForDeletion = v_ptr32()
self.ModifiedWriteCount = v_uint16()
self.NumberOfSystemCacheViews = v_uint16()
class GUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data1 = v_uint32()
self.Data2 = v_uint16()
self.Data3 = v_uint16()
self.Data4 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
class KAPC_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApcListHead = vstruct.VArray([ LIST_ENTRY() for i in xrange(2) ])
self.Process = v_ptr32()
self.KernelApcInProgress = v_uint8()
self.KernelApcPending = v_uint8()
self.UserApcPending = v_uint8()
self._pad0018 = v_bytes(size=1)
class MMVAD_SHORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
class DBGKD_GET_VERSION32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.ProtocolVersion = v_uint16()
self.Flags = v_uint16()
self.KernBase = v_uint32()
self.PsLoadedModuleList = v_uint32()
self.MachineType = v_uint16()
self.ThCallbackStack = v_uint16()
self.NextCallback = v_uint16()
self.FramePointer = v_uint16()
self.KiCallUserMode = v_uint32()
self.KeUserCallbackDispatcher = v_uint32()
self.BreakpointWithStatus = v_uint32()
self.DebuggerDataList = v_uint32()
class CM_CELL_REMAP_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OldCell = v_uint32()
self.NewCell = v_uint32()
class _unnamed_14065(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InitialPrivilegeSet = INITIAL_PRIVILEGE_SET()
class KIDTENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint16()
self.Selector = v_uint16()
self.Access = v_uint16()
self.ExtendedOffset = v_uint16()
class _unnamed_16198(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoResourceRequirementList = v_ptr32()
class _unnamed_16195(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Capabilities = v_ptr32()
class _unnamed_14640(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
self.Vector = v_uint32()
self.Affinity = v_uint32()
class PO_MEMORY_RANGE_ARRAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Range = _unnamed_16445()
class _unnamed_14644(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Channel = v_uint32()
self.Port = v_uint32()
self.Reserved1 = v_uint32()
class SYSTEM_POWER_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint32()
self.PowerButton = POWER_ACTION_POLICY()
self.SleepButton = POWER_ACTION_POLICY()
self.LidClose = POWER_ACTION_POLICY()
self.LidOpenWake = v_uint32()
self.Reserved = v_uint32()
self.Idle = POWER_ACTION_POLICY()
self.IdleTimeout = v_uint32()
self.IdleSensitivity = v_uint8()
self.DynamicThrottle = v_uint8()
self.Spare2 = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.MinSleep = v_uint32()
self.MaxSleep = v_uint32()
self.ReducedLatencySleep = v_uint32()
self.WinLogonFlags = v_uint32()
self.Spare3 = v_uint32()
self.DozeS4Timeout = v_uint32()
self.BroadcastCapacityResolution = v_uint32()
self.DischargePolicy = vstruct.VArray([ SYSTEM_POWER_LEVEL() for i in xrange(4) ])
self.VideoTimeout = v_uint32()
self.VideoDimDisplay = v_uint8()
self._pad00c8 = v_bytes(size=3)
self.VideoReserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.SpindownTimeout = v_uint32()
self.OptimizeForPower = v_uint8()
self.FanThrottleTolerance = v_uint8()
self.ForcedThrottle = v_uint8()
self.MinThrottle = v_uint8()
self.OverThrottled = POWER_ACTION_POLICY()
class IRP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.MdlAddress = v_ptr32()
self.Flags = v_uint32()
self.AssociatedIrp = _unnamed_12973()
self.ThreadListEntry = LIST_ENTRY()
self.IoStatus = IO_STATUS_BLOCK()
self.RequestorMode = v_uint8()
self.PendingReturned = v_uint8()
self.StackCount = v_uint8()
self.CurrentLocation = v_uint8()
self.Cancel = v_uint8()
self.CancelIrql = v_uint8()
self.ApcEnvironment = v_uint8()
self.AllocationFlags = v_uint8()
self.UserIosb = v_ptr32()
self.UserEvent = v_ptr32()
self.Overlay = _unnamed_12976()
self.CancelRoutine = v_ptr32()
self.UserBuffer = v_ptr32()
self.Tail = _unnamed_12979()
class _unnamed_14648(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data = vstruct.VArray([ v_uint32() for i in xrange(3) ])
class _unnamed_16307(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinimumChannel = v_uint32()
self.MaximumChannel = v_uint32()
class _unnamed_16081(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileInformationClass = v_uint32()
self.FileObject = v_ptr32()
self.ReplaceIfExists = v_uint8()
self.AdvanceOnly = v_uint8()
self._pad0010 = v_bytes(size=2)
class POWER_ACTION_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Action = v_uint32()
self.Flags = v_uint32()
self.EventCode = v_uint32()
class SECURITY_DESCRIPTOR_RELATIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.Sbz1 = v_uint8()
self.Control = v_uint16()
self.Owner = v_uint32()
self.Group = v_uint32()
self.Sacl = v_uint32()
self.Dacl = v_uint32()
class DUMP_INITIALIZATION_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Reserved = v_uint32()
self.MemoryBlock = v_ptr32()
self.CommonBuffer = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self._pad0018 = v_bytes(size=4)
self.PhysicalAddress = vstruct.VArray([ LARGE_INTEGER() for i in xrange(2) ])
self.StallRoutine = v_ptr32()
self.OpenRoutine = v_ptr32()
self.WriteRoutine = v_ptr32()
self.FinishRoutine = v_ptr32()
self.AdapterObject = v_ptr32()
self.MappedRegisterBase = v_ptr32()
self.PortConfiguration = v_ptr32()
self.CrashDump = v_uint8()
self._pad0048 = v_bytes(size=3)
self.MaximumTransferSize = v_uint32()
self.CommonBufferSize = v_uint32()
self.TargetAddress = v_ptr32()
self.WritePendingRoutine = v_ptr32()
self.PartitionStyle = v_uint32()
self.DiskInfo = _unnamed_16505()
self._pad0070 = v_bytes(size=4)
class FILE_GET_QUOTA_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextEntryOffset = v_uint32()
self.SidLength = v_uint32()
self.Sid = SID()
class IO_COMPLETION_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Port = v_ptr32()
self.Key = v_ptr32()
class _unnamed_16565(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PhysicalDeviceObject = v_ptr32()
self.ConflictingResource = v_ptr32()
self.ConflictCount = v_ptr32()
self.Conflicts = v_ptr32()
class DRIVER_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DriverObject = v_ptr32()
self.AddDevice = v_ptr32()
self.Count = v_uint32()
self.ServiceKeyName = UNICODE_STRING()
self.ClientDriverExtension = v_ptr32()
self.FsFilterCallbacks = v_ptr32()
class TOKEN_SOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SourceName = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.SourceIdentifier = LUID()
class _unnamed_16561(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatedResources = v_ptr32()
class _unnamed_14549(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseMid = v_uint32()
class flags(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Removable = v_uint8()
class DBGKM_EXCEPTION64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = EXCEPTION_RECORD64()
self.FirstChance = v_uint32()
self._pad00a0 = v_bytes(size=4)
class _unnamed_14544(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseMid = v_uint8()
self.Flags1 = v_uint8()
self.Flags2 = v_uint8()
self.BaseHi = v_uint8()
class PM_SUPPORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Rsvd2 = v_uint8()
class KPRCB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinorVersion = v_uint16()
self.MajorVersion = v_uint16()
self.CurrentThread = v_ptr32()
self.NextThread = v_ptr32()
self.IdleThread = v_ptr32()
self.Number = v_uint8()
self.Reserved = v_uint8()
self.BuildType = v_uint16()
self.SetMember = v_uint32()
self.CpuType = v_uint8()
self.CpuID = v_uint8()
self.CpuStep = v_uint16()
self.ProcessorState = KPROCESSOR_STATE()
self.KernelReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.HalReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.PrcbPad0 = vstruct.VArray([ v_uint8() for i in xrange(92) ])
self.LockQueue = vstruct.VArray([ KSPIN_LOCK_QUEUE() for i in xrange(16) ])
self.PrcbPad1 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.NpxThread = v_ptr32()
self.InterruptCount = v_uint32()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.DpcTime = v_uint32()
self.DebugDpcTime = v_uint32()
self.InterruptTime = v_uint32()
self.AdjustDpcThreshold = v_uint32()
self.PageColor = v_uint32()
self.SkipTick = v_uint32()
self.MultiThreadSetBusy = v_uint8()
self.Spare2 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.ParentNode = v_ptr32()
self.MultiThreadProcessorSet = v_uint32()
self.MultiThreadSetMaster = v_ptr32()
self.ThreadStartCount = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.CcFastReadNoWait = v_uint32()
self.CcFastReadWait = v_uint32()
self.CcFastReadNotPossible = v_uint32()
self.CcCopyReadNoWait = v_uint32()
self.CcCopyReadWait = v_uint32()
self.CcCopyReadNoWaitMiss = v_uint32()
self.KeAlignmentFixupCount = v_uint32()
self.KeContextSwitches = v_uint32()
self.KeDcacheFlushCount = v_uint32()
self.KeExceptionDispatchCount = v_uint32()
self.KeFirstLevelTbFills = v_uint32()
self.KeFloatingEmulationCount = v_uint32()
self.KeIcacheFlushCount = v_uint32()
self.KeSecondLevelTbFills = v_uint32()
self.KeSystemCalls = v_uint32()
self.SpareCounter0 = vstruct.VArray([ v_uint32() for i in xrange(1) ])
self.PPLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(16) ])
self.PPNPagedLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(32) ])
self.PPPagedLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(32) ])
self.PacketBarrier = v_uint32()
self.ReverseStall = v_uint32()
self.IpiFrame = v_ptr32()
self.PrcbPad2 = vstruct.VArray([ v_uint8() for i in xrange(52) ])
self.CurrentPacket = vstruct.VArray([ v_ptr32() for i in xrange(3) ])
self.TargetSet = v_uint32()
self.WorkerRoutine = v_ptr32()
self.IpiFrozen = v_uint32()
self.PrcbPad3 = vstruct.VArray([ v_uint8() for i in xrange(40) ])
self.RequestSummary = v_uint32()
self.SignalDone = v_ptr32()
self.PrcbPad4 = vstruct.VArray([ v_uint8() for i in xrange(56) ])
self.DpcListHead = LIST_ENTRY()
self.DpcStack = v_ptr32()
self.DpcCount = v_uint32()
self.DpcQueueDepth = v_uint32()
self.DpcRoutineActive = v_uint32()
self.DpcInterruptRequested = v_uint32()
self.DpcLastCount = v_uint32()
self.DpcRequestRate = v_uint32()
self.MaximumDpcQueueDepth = v_uint32()
self.MinimumDpcRate = v_uint32()
self.QuantumEnd = v_uint32()
self.PrcbPad5 = vstruct.VArray([ v_uint8() for i in xrange(16) ])
self.DpcLock = v_uint32()
self.PrcbPad6 = vstruct.VArray([ v_uint8() for i in xrange(28) ])
self.CallDpc = KDPC()
self.ChainedInterruptList = v_ptr32()
self.LookasideIrpFloat = v_uint32()
self.SpareFields0 = vstruct.VArray([ v_uint32() for i in xrange(6) ])
self.VendorString = vstruct.VArray([ v_uint8() for i in xrange(13) ])
self.InitialApicId = v_uint8()
self.LogicalProcessorsPerPhysicalProcessor = v_uint8()
self._pad0910 = v_bytes(size=1)
self.MHz = v_uint32()
self.FeatureBits = v_uint32()
self.UpdateSignature = LARGE_INTEGER()
self.NpxSaveArea = FX_SAVE_AREA()
self.PowerState = PROCESSOR_POWER_STATE()
class HEAP_VIRTUAL_ALLOC_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = LIST_ENTRY()
self.ExtraStuff = HEAP_ENTRY_EXTRA()
self.CommitSize = v_uint32()
self.ReserveSize = v_uint32()
self.BusyBlock = HEAP_ENTRY()
class VI_DEADLOCK_THREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Thread = v_ptr32()
self.CurrentSpinNode = v_ptr32()
self.CurrentOtherNode = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.NodeCount = v_uint32()
self.PagingCount = v_uint32()
class SUPPORTED_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.SystemAddressSpace = v_uint32()
self.SystemBase = v_uint64()
self.Base = v_uint64()
self.Limit = v_uint64()
class ARBITER_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Parameters = _unnamed_15247()
class EXCEPTION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_ptr32()
self.ExceptionAddress = v_ptr32()
self.NumberParameters = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint32() for i in xrange(15) ])
class MMPTE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = _unnamed_11597()
class VI_DEADLOCK_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Parent = v_ptr32()
self.ChildrenList = LIST_ENTRY()
self.SiblingsList = LIST_ENTRY()
self.ResourceList = LIST_ENTRY()
self.Root = v_ptr32()
self.ThreadEntry = v_ptr32()
self.Active = v_uint32()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.ParentStackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
class KPCR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NtTib = NT_TIB()
self.SelfPcr = v_ptr32()
self.Prcb = v_ptr32()
self.Irql = v_uint8()
self._pad0028 = v_bytes(size=3)
self.IRR = v_uint32()
self.IrrActive = v_uint32()
self.IDR = v_uint32()
self.KdVersionBlock = v_ptr32()
self.IDT = v_ptr32()
self.GDT = v_ptr32()
self.TSS = v_ptr32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.SetMember = v_uint32()
self.StallScaleFactor = v_uint32()
self.DebugActive = v_uint8()
self.Number = v_uint8()
self.Spare0 = v_uint8()
self.SecondLevelCacheAssociativity = v_uint8()
self.VdmAlert = v_uint32()
self.KernelReserved = vstruct.VArray([ v_uint32() for i in xrange(14) ])
self.SecondLevelCacheSize = v_uint32()
self.HalReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.InterruptMode = v_uint32()
self.Spare1 = v_uint8()
self._pad00dc = v_bytes(size=3)
self.KernelReserved2 = vstruct.VArray([ v_uint32() for i in xrange(17) ])
self.PrcbData = KPRCB()
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Characteristics = v_uint16()
class CM_KEY_INDEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Count = v_uint16()
self.List = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class IMAGE_DEBUG_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.Type = v_uint32()
self.SizeOfData = v_uint32()
self.AddressOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
class AMD64_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint64()
self.CurrentSymbolStart = v_uint64()
self.CurrentSymbolEnd = v_uint64()
class SYSPTES_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Count = v_uint32()
class DBGKD_READ_WRITE_IO_EXTENDED32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.AddressSpace = v_uint32()
self.IoAddress = v_uint32()
self.DataValue = v_uint32()
class PEB_LDR_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Initialized = v_uint8()
self._pad0008 = v_bytes(size=3)
self.SsHandle = v_ptr32()
self.InLoadOrderModuleList = LIST_ENTRY()
self.InMemoryOrderModuleList = LIST_ENTRY()
self.InInitializationOrderModuleList = LIST_ENTRY()
self.EntryInProgress = v_ptr32()
class DBGKD_WRITE_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointAddress = v_uint64()
self.BreakPointHandle = v_uint32()
self._pad0010 = v_bytes(size=4)
class IMAGE_NT_HEADERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER()
class HEAP_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.PreviousSize = v_uint16()
self.SmallTagIndex = v_uint8()
self.Flags = v_uint8()
self.UnusedBytes = v_uint8()
self.SegmentIndex = v_uint8()
class _unnamed_16304(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinimumVector = v_uint32()
self.MaximumVector = v_uint32()
class SECURITY_SUBJECT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClientToken = v_ptr32()
self.ImpersonationLevel = v_uint32()
self.PrimaryToken = v_ptr32()
self.ProcessAuditId = v_ptr32()
class X86_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint32()
self.CurrentSymbolStart = v_uint32()
self.CurrentSymbolEnd = v_uint32()
class SEP_AUDIT_POLICY_OVERLAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PolicyBits = v_uint64()
class MI_VERIFIER_DRIVER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.Loads = v_uint32()
self.Unloads = v_uint32()
self.BaseName = UNICODE_STRING()
self.StartAddress = v_ptr32()
self.EndAddress = v_ptr32()
self.Flags = v_uint32()
self.Signature = v_uint32()
self.Reserved = v_uint32()
self.VerifierPoolLock = v_uint32()
self.PoolHash = v_ptr32()
self.PoolHashSize = v_uint32()
self.PoolHashFree = v_uint32()
self.PoolHashReserved = v_uint32()
self.CurrentPagedPoolAllocations = v_uint32()
self.CurrentNonPagedPoolAllocations = v_uint32()
self.PeakPagedPoolAllocations = v_uint32()
self.PeakNonPagedPoolAllocations = v_uint32()
self.PagedBytes = v_uint32()
self.NonPagedBytes = v_uint32()
self.PeakPagedBytes = v_uint32()
self.PeakNonPagedBytes = v_uint32()
class GDI_TEB_BATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint32()
self.HDC = v_uint32()
self.Buffer = vstruct.VArray([ v_uint32() for i in xrange(310) ])
class WMI_CLIENT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProcessorNumber = v_uint8()
self.Alignment = v_uint8()
self.LoggerId = v_uint16()
class MMSUBSECTION_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadOnly = v_uint32()
class INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
class OBJECT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HashBuckets = vstruct.VArray([ v_ptr32() for i in xrange(37) ])
self.Lock = EX_PUSH_LOCK()
self.DeviceMap = v_ptr32()
self.SessionId = v_uint32()
self.Reserved = v_uint16()
self.SymbolicLinkUsageCount = v_uint16()
class WMI_LOGGER_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BufferSpinLock = v_uint32()
self._pad0008 = v_bytes(size=4)
self.StartTime = LARGE_INTEGER()
self.LogFileHandle = v_ptr32()
self.LoggerSemaphore = KSEMAPHORE()
self.LoggerThread = v_ptr32()
self.LoggerEvent = KEVENT()
self.FlushEvent = KEVENT()
self.LoggerStatus = v_uint32()
self.LoggerId = v_uint32()
self.BuffersAvailable = v_uint32()
self.UsePerfClock = v_uint32()
self.WriteFailureLimit = v_uint32()
self.BuffersDirty = v_uint32()
self.BuffersInUse = v_uint32()
self.SwitchingInProgress = v_uint32()
self._pad0070 = v_bytes(size=4)
self.FreeList = SLIST_HEADER()
self.FlushList = SLIST_HEADER()
self.GlobalList = SLIST_HEADER()
self.ProcessorBuffers = v_ptr32()
self.LoggerName = UNICODE_STRING()
self.LogFileName = UNICODE_STRING()
self.LogFilePattern = UNICODE_STRING()
self.NewLogFileName = UNICODE_STRING()
self.EndPageMarker = v_ptr32()
self.CollectionOn = v_uint32()
self.KernelTraceOn = v_uint32()
self.PerfLogInTransition = v_uint32()
self.RequestFlag = v_uint32()
self.EnableFlags = v_uint32()
self.MaximumFileSize = v_uint32()
self.LoggerMode = v_uint32()
self.LastFlushedBuffer = v_uint32()
self.RefCount = v_uint32()
self.FlushTimer = v_uint32()
self.FirstBufferOffset = LARGE_INTEGER()
self.ByteOffset = LARGE_INTEGER()
self.BufferAgeLimit = LARGE_INTEGER()
self.MaximumBuffers = v_uint32()
self.MinimumBuffers = v_uint32()
self.EventsLost = v_uint32()
self.BuffersWritten = v_uint32()
self.LogBuffersLost = v_uint32()
self.RealTimeBuffersLost = v_uint32()
self.BufferSize = v_uint32()
self.NumberOfBuffers = v_uint32()
self.SequencePtr = v_ptr32()
self.InstanceGuid = GUID()
self.LoggerHeader = v_ptr32()
self.GetCpuClock = v_ptr32()
self.ClientSecurityContext = SECURITY_CLIENT_CONTEXT()
self.LoggerExtension = v_ptr32()
self.ReleaseQueue = v_uint32()
self.EnableFlagExtension = TRACE_ENABLE_FLAG_EXTENSION()
self.LocalSequence = v_uint32()
self.MaximumIrql = v_uint32()
self.EnableFlagArray = v_ptr32()
self.LoggerMutex = KMUTANT()
self.MutexCount = v_uint32()
self.FileCounter = v_uint32()
self.BufferCallback = v_ptr32()
self.CallbackContext = v_ptr32()
self.PoolType = v_uint32()
self._pad01b8 = v_bytes(size=4)
self.ReferenceSystemTime = LARGE_INTEGER()
self.ReferenceTimeStamp = LARGE_INTEGER()
class IO_STACK_LOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorFunction = v_uint8()
self.MinorFunction = v_uint8()
self.Flags = v_uint8()
self.Control = v_uint8()
self.Parameters = _unnamed_14762()
self.DeviceObject = v_ptr32()
self.FileObject = v_ptr32()
self.CompletionRoutine = v_ptr32()
self.Context = v_ptr32()
class DBGKD_READ_WRITE_MSR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Msr = v_uint32()
self.DataValueLow = v_uint32()
self.DataValueHigh = v_uint32()
class _unnamed_14745(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UserApcRoutine = v_ptr32()
self.UserApcContext = v_ptr32()
class PCI_PDO_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
self.Slot = PCI_SLOT_NUMBER()
self.PhysicalDeviceObject = v_ptr32()
self.ParentFdoExtension = v_ptr32()
self.SecondaryExtension = SINGLE_LIST_ENTRY()
self.BusInterfaceReferenceCount = v_uint32()
self.AgpInterfaceReferenceCount = v_uint32()
self.VendorId = v_uint16()
self.DeviceId = v_uint16()
self.SubsystemVendorId = v_uint16()
self.SubsystemId = v_uint16()
self.RevisionId = v_uint8()
self.ProgIf = v_uint8()
self.SubClass = v_uint8()
self.BaseClass = v_uint8()
self.AdditionalResourceCount = v_uint8()
self.AdjustedInterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.RawInterruptLine = v_uint8()
self.CapabilitiesPtr = v_uint8()
self.SavedLatencyTimer = v_uint8()
self.SavedCacheLineSize = v_uint8()
self.HeaderType = v_uint8()
self.NotPresent = v_uint8()
self.ReportedMissing = v_uint8()
self.ExpectedWritebackFailure = v_uint8()
self.NoTouchPmeEnable = v_uint8()
self.LegacyDriver = v_uint8()
self.UpdateHardware = v_uint8()
self.MovedDevice = v_uint8()
self.DisablePowerDown = v_uint8()
self.NeedsHotPlugConfiguration = v_uint8()
self.SwitchedIDEToNativeMode = v_uint8()
self.BIOSAllowsIDESwitchToNativeMode = v_uint8()
self.IoSpaceUnderNativeIdeControl = v_uint8()
self.OnDebugPath = v_uint8()
self._pad005c = v_bytes(size=3)
self.PowerState = PCI_POWER_STATE()
self.Dependent = PCI_HEADER_TYPE_DEPENDENT()
self.HackFlags = v_uint64()
self.Resources = v_ptr32()
self.BridgeFdoExtension = v_ptr32()
self.NextBridge = v_ptr32()
self.NextHashEntry = v_ptr32()
self.Lock = PCI_LOCK()
self.PowerCapabilities = PCI_PMC()
self.TargetAgpCapabilityId = v_uint8()
self._pad00c4 = v_bytes(size=1)
self.CommandEnables = v_uint16()
self.InitialCommand = v_uint16()
class IMAGE_DATA_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.Size = v_uint32()
class FILE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.Vpb = v_ptr32()
self.FsContext = v_ptr32()
self.FsContext2 = v_ptr32()
self.SectionObjectPointer = v_ptr32()
self.PrivateCacheMap = v_ptr32()
self.FinalStatus = v_uint32()
self.RelatedFileObject = v_ptr32()
self.LockOperation = v_uint8()
self.DeletePending = v_uint8()
self.ReadAccess = v_uint8()
self.WriteAccess = v_uint8()
self.DeleteAccess = v_uint8()
self.SharedRead = v_uint8()
self.SharedWrite = v_uint8()
self.SharedDelete = v_uint8()
self.Flags = v_uint32()
self.FileName = UNICODE_STRING()
self.CurrentByteOffset = LARGE_INTEGER()
self.Waiters = v_uint32()
self.Busy = v_uint32()
self.LastLock = v_ptr32()
self.Lock = KEVENT()
self.Event = KEVENT()
self.CompletionContext = v_ptr32()
class MMWSLE_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Key = v_ptr32()
self.Index = v_uint32()
class _unnamed_16004(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.Reserved = v_uint16()
self.ShareAccess = v_uint16()
self.Parameters = v_ptr32()
class SECTION_IMAGE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TransferAddress = v_ptr32()
self.ZeroBits = v_uint32()
self.MaximumStackSize = v_uint32()
self.CommittedStackSize = v_uint32()
self.SubSystemType = v_uint32()
self.SubSystemMinorVersion = v_uint16()
self.SubSystemMajorVersion = v_uint16()
self.GpValue = v_uint32()
self.ImageCharacteristics = v_uint16()
self.DllCharacteristics = v_uint16()
self.Machine = v_uint16()
self.ImageContainsCode = v_uint8()
self.Spare1 = v_uint8()
self.LoaderFlags = v_uint32()
self.ImageFileSize = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class HEAP_SUBSEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Bucket = v_ptr32()
self.UserBlocks = v_ptr32()
self.AggregateExchg = INTERLOCK_SEQ()
self.BlockSize = v_uint16()
self.FreeThreshold = v_uint16()
self.BlockCount = v_uint16()
self.SizeIndex = v_uint8()
self.AffinityIndex = v_uint8()
self.SFreeListEntry = SINGLE_LIST_ENTRY()
self.Lock = v_uint32()
class ERESOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemResourcesList = LIST_ENTRY()
self.OwnerTable = v_ptr32()
self.ActiveCount = v_uint16()
self.Flag = v_uint16()
self.SharedWaiters = v_ptr32()
self.ExclusiveWaiters = v_ptr32()
self.OwnerThreads = vstruct.VArray([ OWNER_ENTRY() for i in xrange(2) ])
self.ContentionCount = v_uint32()
self.NumberOfSharedWaiters = v_uint16()
self.NumberOfExclusiveWaiters = v_uint16()
self.Address = v_ptr32()
self.SpinLock = v_uint32()
class MBCB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeIsInZone = v_uint16()
self.PagesToWrite = v_uint32()
self.DirtyPages = v_uint32()
self.Reserved = v_uint32()
self.BitmapRanges = LIST_ENTRY()
self.ResumeWritePage = v_uint64()
self.BitmapRange1 = BITMAP_RANGE()
self.BitmapRange2 = BITMAP_RANGE()
self.BitmapRange3 = BITMAP_RANGE()
class RTL_ATOM_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HashLink = v_ptr32()
self.HandleIndex = v_uint16()
self.Atom = v_uint16()
self.ReferenceCount = v_uint16()
self.Flags = v_uint8()
self.NameLength = v_uint8()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0010 = v_bytes(size=2)
class _unnamed_12979(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Overlay = _unnamed_14765()
self._pad0030 = v_bytes(size=8)
class CHILD_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.List = v_uint32()
class _unnamed_16094(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
class RTL_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.UserData = v_ptr32()
self.Owner = v_ptr32()
self.Attributes = v_uint8()
self.Flags = v_uint8()
self._pad0020 = v_bytes(size=6)
class PCI_MJ_DISPATCH_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PnpIrpMaximumMinorFunction = v_uint32()
self.PnpIrpDispatchTable = v_ptr32()
self.PowerIrpMaximumMinorFunction = v_uint32()
self.PowerIrpDispatchTable = v_ptr32()
self.SystemControlIrpDispatchStyle = v_uint32()
self.SystemControlIrpDispatchFunction = v_ptr32()
self.OtherIrpDispatchStyle = v_uint32()
self.OtherIrpDispatchFunction = v_ptr32()
class EX_PUSH_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Waiting = v_uint32()
class ARBITER_INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
self.ArbiterHandler = v_ptr32()
self.Flags = v_uint32()
class SLIST_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Alignment = v_uint64()
class _unnamed_16135(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Srb = v_ptr32()
class _unnamed_16642(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BlockedDriverGuid = GUID()
class _unnamed_16131(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Vpb = v_ptr32()
self.DeviceObject = v_ptr32()
class HEAP_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.Signature = v_uint32()
self.Flags = v_uint32()
self.Heap = v_ptr32()
self.LargestUnCommittedRange = v_uint32()
self.BaseAddress = v_ptr32()
self.NumberOfPages = v_uint32()
self.FirstEntry = v_ptr32()
self.LastValidEntry = v_ptr32()
self.NumberOfUnCommittedPages = v_uint32()
self.NumberOfUnCommittedRanges = v_uint32()
self.UnCommittedRanges = v_ptr32()
self.AllocatorBackTraceIndex = v_uint16()
self.Reserved = v_uint16()
self.LastEntryInSegment = v_ptr32()
class POP_DEVICE_POWER_IRP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Free = SINGLE_LIST_ENTRY()
self.Irp = v_ptr32()
self.Notify = v_ptr32()
self.Pending = LIST_ENTRY()
self.Complete = LIST_ENTRY()
self.Abort = LIST_ENTRY()
self.Failed = LIST_ENTRY()
class HEAP_FREE_ENTRY_EXTRA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TagIndex = v_uint16()
self.FreeBackTraceIndex = v_uint16()
class PRIVATE_CACHE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self._pad0004 = v_bytes(size=2)
self.ReadAheadMask = v_uint32()
self.FileObject = v_ptr32()
self._pad0010 = v_bytes(size=4)
self.FileOffset1 = LARGE_INTEGER()
self.BeyondLastByte1 = LARGE_INTEGER()
self.FileOffset2 = LARGE_INTEGER()
self.BeyondLastByte2 = LARGE_INTEGER()
self.ReadAheadOffset = vstruct.VArray([ LARGE_INTEGER() for i in xrange(2) ])
self.ReadAheadLength = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ReadAheadSpinLock = v_uint32()
self.PrivateLinks = LIST_ENTRY()
self._pad0058 = v_bytes(size=4)
class SEP_AUDIT_POLICY_CATEGORIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.System = v_uint32()
self.AccountLogon = v_uint32()
class IMAGE_SECTION_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.Misc = _unnamed_14793()
self.VirtualAddress = v_uint32()
self.SizeOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
self.PointerToRelocations = v_uint32()
self.PointerToLinenumbers = v_uint32()
self.NumberOfRelocations = v_uint16()
self.NumberOfLinenumbers = v_uint16()
self.Characteristics = v_uint32()
class ACL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AclRevision = v_uint8()
self.Sbz1 = v_uint8()
self.AclSize = v_uint16()
self.AceCount = v_uint16()
self.Sbz2 = v_uint16()
class _unnamed_10498(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_10880(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FnArea = FNSAVE_FORMAT()
self._pad0208 = v_bytes(size=412)
class VACB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddress = v_ptr32()
self.SharedCacheMap = v_ptr32()
self.Overlay = _unnamed_11926()
self.LruList = LIST_ENTRY()
class WAIT_CONTEXT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WaitQueueEntry = KDEVICE_QUEUE_ENTRY()
self.DeviceRoutine = v_ptr32()
self.DeviceContext = v_ptr32()
self.NumberOfMapRegisters = v_uint32()
self.DeviceObject = v_ptr32()
self.CurrentIrp = v_ptr32()
self.BufferChainingDpc = v_ptr32()
class CM_KEY_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Flags = v_uint16()
self.LastWriteTime = LARGE_INTEGER()
self.Spare = v_uint32()
self.Parent = v_uint32()
self.SubKeyCounts = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.SubKeyLists = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ValueList = CHILD_LIST()
self.Security = v_uint32()
self.Class = v_uint32()
self.MaxNameLen = v_uint32()
self.MaxClassLen = v_uint32()
self.MaxValueNameLen = v_uint32()
self.MaxValueDataLen = v_uint32()
self.WorkVar = v_uint32()
self.NameLength = v_uint16()
self.ClassLength = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0050 = v_bytes(size=2)
class SE_AUDIT_PROCESS_CREATION_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageFileName = v_ptr32()
class ACTIVATION_CONTEXT_STACK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.NextCookieSequenceNumber = v_uint32()
self.ActiveFrame = v_ptr32()
self.FrameListCache = LIST_ENTRY()
class SECURITY_TOKEN_PROXY_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.ProxyClass = v_uint32()
self.PathInfo = UNICODE_STRING()
self.ContainerMask = v_uint32()
self.ObjectMask = v_uint32()
class _unnamed_16639(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VetoType = v_uint32()
self.DeviceIdVetoNameBuffer = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0008 = v_bytes(size=2)
class _unnamed_16636(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationCode = v_uint32()
self.NotificationData = v_uint32()
class _unnamed_16634(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Notification = v_ptr32()
class EX_RUNDOWN_REF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
class _unnamed_16631(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationStructure = v_ptr32()
self.DeviceIds = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0008 = v_bytes(size=2)
class CM_NOTIFY_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HiveList = LIST_ENTRY()
self.PostList = LIST_ENTRY()
self.KeyControlBlock = v_ptr32()
self.KeyBody = v_ptr32()
self.Filter = v_uint32()
self.SubjectContext = SECURITY_SUBJECT_CONTEXT()
class MMPTE_PROTOTYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PCI_HEADER_TYPE_DEPENDENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.type0 = _unnamed_14410()
class CM_BIG_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Count = v_uint16()
self.List = v_uint32()
class IMAGE_DOS_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_magic = v_uint16()
self.e_cblp = v_uint16()
self.e_cp = v_uint16()
self.e_crlc = v_uint16()
self.e_cparhdr = v_uint16()
self.e_minalloc = v_uint16()
self.e_maxalloc = v_uint16()
self.e_ss = v_uint16()
self.e_sp = v_uint16()
self.e_csum = v_uint16()
self.e_ip = v_uint16()
self.e_cs = v_uint16()
self.e_lfarlc = v_uint16()
self.e_ovno = v_uint16()
self.e_res = vstruct.VArray([ v_uint16() for i in xrange(4) ])
self.e_oemid = v_uint16()
self.e_oeminfo = v_uint16()
self.e_res2 = vstruct.VArray([ v_uint16() for i in xrange(10) ])
self.e_lfanew = v_uint32()
class _unnamed_15795(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceClass = _unnamed_16624()
class DBGKD_FILL_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Address = v_uint64()
self.Length = v_uint32()
self.Flags = v_uint16()
self.PatternLength = v_uint16()
class CM_KEY_SECURITY_CACHE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cell = v_uint32()
self.CachedSecurity = v_ptr32()
class _unnamed_16663(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.CheckSum = v_uint32()
class _unnamed_16255(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Argument1 = v_ptr32()
self.Argument2 = v_ptr32()
self.Argument3 = v_ptr32()
self.Argument4 = v_ptr32()
class _unnamed_12606(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageInformation = v_ptr32()
class _unnamed_12605(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageCommitment = v_uint32()
class _unnamed_16226(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InPath = v_uint8()
self.Reserved = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Type = v_uint32()
class ARBITER_ORDERING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
class MMVIEW(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = v_uint32()
self.ControlArea = v_ptr32()
class EXCEPTION_RECORD32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_uint32()
self.ExceptionAddress = v_uint32()
self.NumberParameters = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint32() for i in xrange(15) ])
class DBGKD_READ_MEMORY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint32()
self.TransferCount = v_uint32()
self.ActualBytesRead = v_uint32()
class QUAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DoNotUseThisField = v_uint64()
class _unnamed_11926(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = LARGE_INTEGER()
class LPCP_PORT_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConnectionPort = v_ptr32()
self.ConnectedPort = v_ptr32()
self.MsgQueue = LPCP_PORT_QUEUE()
self.Creator = CLIENT_ID()
self.ClientSectionBase = v_ptr32()
self.ServerSectionBase = v_ptr32()
self.PortContext = v_ptr32()
self.ClientThread = v_ptr32()
self.SecurityQos = SECURITY_QUALITY_OF_SERVICE()
self.StaticSecurity = SECURITY_CLIENT_CONTEXT()
self.LpcReplyChainHead = LIST_ENTRY()
self.LpcDataInfoChainHead = LIST_ENTRY()
self.ServerProcess = v_ptr32()
self.MaxMessageLength = v_uint16()
self.MaxConnectionInfoLength = v_uint16()
self.Flags = v_uint32()
self.WaitEvent = KEVENT()
class CALL_PERFORMANCE_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpinLock = v_uint32()
self.HashTable = vstruct.VArray([ LIST_ENTRY() for i in xrange(64) ])
class EXCEPTION_POINTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = v_ptr32()
self.ContextRecord = v_ptr32()
class CM_KEY_SECURITY_CACHE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cell = v_uint32()
self.ConvKey = v_uint32()
self.List = LIST_ENTRY()
self.DescriptorLength = v_uint32()
self.Descriptor = SECURITY_DESCRIPTOR_RELATIVE()
class POP_TRIGGER_WAIT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Event = KEVENT()
self.Status = v_uint32()
self.Link = LIST_ENTRY()
self.Trigger = v_ptr32()
class DEVICE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.ReferenceCount = v_uint32()
self.DriverObject = v_ptr32()
self.NextDevice = v_ptr32()
self.AttachedDevice = v_ptr32()
self.CurrentIrp = v_ptr32()
self.Timer = v_ptr32()
self.Flags = v_uint32()
self.Characteristics = v_uint32()
self.Vpb = v_ptr32()
self.DeviceExtension = v_ptr32()
self.DeviceType = v_uint32()
self.StackSize = v_uint8()
self._pad0034 = v_bytes(size=3)
self.Queue = _unnamed_11075()
self.AlignmentRequirement = v_uint32()
self.DeviceQueue = KDEVICE_QUEUE()
self.Dpc = KDPC()
self.ActiveThreadCount = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.DeviceLock = KEVENT()
self.SectorSize = v_uint16()
self.Spare1 = v_uint16()
self.DeviceObjectExtension = v_ptr32()
self.Reserved = v_ptr32()
class MMVAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
self.ControlArea = v_ptr32()
self.FirstPrototypePte = v_ptr32()
self.LastContiguousPte = v_ptr32()
self.u2 = _unnamed_14103()
class _unnamed_13227(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class CM_NAME_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConvKey = v_uint32()
self.NextHash = v_ptr32()
self.NameLength = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class EX_PUSH_LOCK_WAIT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WakeEvent = KEVENT()
self.Next = v_ptr32()
self.ShareCount = v_uint32()
self.Exclusive = v_uint8()
self._pad001c = v_bytes(size=3)
class _unnamed_13174(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ShortFlags = v_uint16()
self.ReferenceCount = v_uint16()
class _unnamed_16299(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Alignment = v_uint32()
self.MinimumAddress = LARGE_INTEGER()
self.MaximumAddress = LARGE_INTEGER()
class LPCP_MESSAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = LIST_ENTRY()
self.SenderPort = v_ptr32()
self.RepliedToThread = v_ptr32()
self.PortContext = v_ptr32()
self._pad0018 = v_bytes(size=4)
self.Request = PORT_MESSAGE()
class EX_QUEUE_WORKER_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.QueueDisabled = v_uint32()
class PCI_FDO_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = SINGLE_LIST_ENTRY()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
self.PhysicalDeviceObject = v_ptr32()
self.FunctionalDeviceObject = v_ptr32()
self.AttachedDeviceObject = v_ptr32()
self.ChildListLock = KEVENT()
self.ChildPdoList = v_ptr32()
self.BusRootFdoExtension = v_ptr32()
self.ParentFdoExtension = v_ptr32()
self.ChildBridgePdoList = v_ptr32()
self.PciBusInterface = v_ptr32()
self.MaxSubordinateBus = v_uint8()
self._pad0054 = v_bytes(size=3)
self.BusHandler = v_ptr32()
self.BaseBus = v_uint8()
self.Fake = v_uint8()
self.ChildDelete = v_uint8()
self.Scanned = v_uint8()
self.ArbitersInitialized = v_uint8()
self.BrokenVideoHackApplied = v_uint8()
self.Hibernated = v_uint8()
self._pad0060 = v_bytes(size=1)
self.PowerState = PCI_POWER_STATE()
self.SecondaryExtension = SINGLE_LIST_ENTRY()
self.ChildWaitWakeCount = v_uint32()
self.PreservedConfig = v_ptr32()
self.Lock = PCI_LOCK()
self.HotPlugParameters = _unnamed_14395()
self._pad00bc = v_bytes(size=3)
self.BusHackFlags = v_uint32()
class _unnamed_16573(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReserveDevice = v_ptr32()
class PS_IMPERSONATION_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Token = v_ptr32()
self.CopyOnOpen = v_uint8()
self.EffectiveOnly = v_uint8()
self._pad0008 = v_bytes(size=2)
self.ImpersonationLevel = v_uint32()
class DBGKD_WRITE_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointAddress = v_uint32()
self.BreakPointHandle = v_uint32()
class MMPFNLIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Total = v_uint32()
self.ListName = v_uint32()
self.Flink = v_uint32()
self.Blink = v_uint32()
class SINGLE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
class _unnamed_14410(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(4) ])
class _unnamed_14411(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SubtractiveDecode = v_uint8()
class KNODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProcessorMask = v_uint32()
self.Color = v_uint32()
self.MmShiftedColor = v_uint32()
self.FreeCount = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0018 = v_bytes(size=4)
self.DeadStackList = SLIST_HEADER()
self.PfnDereferenceSListHead = SLIST_HEADER()
self.PfnDeferredList = v_ptr32()
self.Seed = v_uint8()
self.Flags = flags()
self._pad0030 = v_bytes(size=2)
class _unnamed_14793(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PhysicalAddress = v_uint32()
class _unnamed_16078(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileInformationClass = v_uint32()
class SYSTEM_POWER_CAPABILITIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerButtonPresent = v_uint8()
self.SleepButtonPresent = v_uint8()
self.LidPresent = v_uint8()
self.SystemS1 = v_uint8()
self.SystemS2 = v_uint8()
self.SystemS3 = v_uint8()
self.SystemS4 = v_uint8()
self.SystemS5 = v_uint8()
self.HiberFilePresent = v_uint8()
self.FullWake = v_uint8()
self.VideoDimPresent = v_uint8()
self.ApmPresent = v_uint8()
self.UpsPresent = v_uint8()
self.ThermalControl = v_uint8()
self.ProcessorThrottle = v_uint8()
self.ProcessorMinThrottle = v_uint8()
self.ProcessorMaxThrottle = v_uint8()
self.spare2 = vstruct.VArray([ v_uint8() for i in xrange(4) ])
self.DiskSpinDown = v_uint8()
self.spare3 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.SystemBatteriesPresent = v_uint8()
self.BatteriesAreShortTerm = v_uint8()
self.BatteryScale = vstruct.VArray([ BATTERY_REPORTING_SCALE() for i in xrange(3) ])
self.AcOnLineWake = v_uint32()
self.SoftLidWake = v_uint32()
self.RtcWake = v_uint32()
self.MinDeviceWakeState = v_uint32()
self.DefaultLowLatencyWake = v_uint32()
class DBGKD_SET_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFlags = v_uint32()
class MMEXTEND_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CommittedSize = v_uint64()
self.ReferenceCount = v_uint32()
self._pad0010 = v_bytes(size=4)
class _unnamed_16075(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.CompletionFilter = v_uint32()
class RTL_USER_PROCESS_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MaximumLength = v_uint32()
self.Length = v_uint32()
self.Flags = v_uint32()
self.DebugFlags = v_uint32()
self.ConsoleHandle = v_ptr32()
self.ConsoleFlags = v_uint32()
self.StandardInput = v_ptr32()
self.StandardOutput = v_ptr32()
self.StandardError = v_ptr32()
self.CurrentDirectory = CURDIR()
self.DllPath = UNICODE_STRING()
self.ImagePathName = UNICODE_STRING()
self.CommandLine = UNICODE_STRING()
self.Environment = v_ptr32()
self.StartingX = v_uint32()
self.StartingY = v_uint32()
self.CountX = v_uint32()
self.CountY = v_uint32()
self.CountCharsX = v_uint32()
self.CountCharsY = v_uint32()
self.FillAttribute = v_uint32()
self.WindowFlags = v_uint32()
self.ShowWindowFlags = v_uint32()
self.WindowTitle = UNICODE_STRING()
self.DesktopInfo = UNICODE_STRING()
self.ShellInfo = UNICODE_STRING()
self.RuntimeData = UNICODE_STRING()
self.CurrentDirectores = vstruct.VArray([ RTL_DRIVE_LETTER_CURDIR() for i in xrange(32) ])
class u(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeyNode = CM_KEY_NODE()
class IO_RESOURCE_REQUIREMENTS_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.SlotNumber = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.AlternativeLists = v_uint32()
self.List = vstruct.VArray([ IO_RESOURCE_LIST() for i in xrange(1) ])
class POWER_CHANNEL_SUMMARY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.TotalCount = v_uint32()
self.D0Count = v_uint32()
self.NotifyList = LIST_ENTRY()
| apache-2.0 |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/session_bundle/example/export_half_plus_two.py | 44 | 6027 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
FLAGS = None
def Export(export_dir, use_checkpoint_v2):
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.multiply(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver(
{
"a": a,
"b": b
},
sharded=True,
write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else
tf.train.SaverDef.V1)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existance as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.global_variables_initializer().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(export_dir, global_step_tensor, sess)
def main(_):
Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--export_dir",
type=str,
default="/tmp/half_plus_two",
help="Directory where to export inference model."
)
parser.add_argument(
"--use_checkpoint_v2",
"bool",
nargs="?",
const=True,
default=False,
help="If true, write v2 checkpoint files."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
sephii/django-cms | cms/cms_plugins.py | 46 | 4350 | # -*- coding: utf-8 -*-
from cms.models import CMSPlugin, Placeholder
from cms.models.aliaspluginmodel import AliasPluginModel
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.plugin_base import CMSPluginBase, PluginMenuItem
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.utils.urlutils import admin_reverse
from django.conf.urls import url
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from django.middleware.csrf import get_token
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
class PlaceholderPlugin(CMSPluginBase):
name = _("Placeholder")
parent_classes = [0] # so you will not be able to add it something
#require_parent = True
render_plugin = False
admin_preview = False
system = True
model = PlaceholderReference
plugin_pool.register_plugin(PlaceholderPlugin)
class AliasPlugin(CMSPluginBase):
name = _("Alias")
allow_children = False
model = AliasPluginModel
render_template = "cms/plugins/alias.html"
system = True
def render(self, context, instance, placeholder):
from cms.utils.plugins import downcast_plugins, build_plugin_tree
context['instance'] = instance
context['placeholder'] = placeholder
if instance.plugin_id:
plugins = instance.plugin.get_descendants().order_by('placeholder', 'path')
plugins = [instance.plugin] + list(plugins)
plugins = downcast_plugins(plugins)
plugins[0].parent_id = None
plugins = build_plugin_tree(plugins)
context['plugins'] = plugins
if instance.alias_placeholder_id:
content = render_placeholder(instance.alias_placeholder, context)
context['content'] = mark_safe(content)
return context
def get_extra_global_plugin_menu_items(self, request, plugin):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'plugin_id': plugin.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_extra_placeholder_menu_items(self, request, placeholder):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'placeholder_id': placeholder.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_plugin_urls(self):
return [
url(r'^create_alias/$', self.create_alias, name='cms_create_alias'),
]
def create_alias(self, request):
if not request.user.is_staff:
return HttpResponseForbidden("not enough privileges")
if not 'plugin_id' in request.POST and not 'placeholder_id' in request.POST:
return HttpResponseBadRequest("plugin_id or placeholder_id POST parameter missing.")
plugin = None
placeholder = None
if 'plugin_id' in request.POST:
pk = request.POST['plugin_id']
try:
plugin = CMSPlugin.objects.get(pk=pk)
except CMSPlugin.DoesNotExist:
return HttpResponseBadRequest("plugin with id %s not found." % pk)
if 'placeholder_id' in request.POST:
pk = request.POST['placeholder_id']
try:
placeholder = Placeholder.objects.get(pk=pk)
except Placeholder.DoesNotExist:
return HttpResponseBadRequest("placeholder with id %s not found." % pk)
if not placeholder.has_change_permission(request):
return HttpResponseBadRequest("You do not have enough permission to alias this placeholder.")
clipboard = request.toolbar.clipboard
clipboard.cmsplugin_set.all().delete()
language = get_language()
if plugin:
language = plugin.language
alias = AliasPluginModel(language=language, placeholder=clipboard, plugin_type="AliasPlugin")
if plugin:
alias.plugin = plugin
if placeholder:
alias.alias_placeholder = placeholder
alias.save()
return HttpResponse("ok")
plugin_pool.register_plugin(AliasPlugin)
| bsd-3-clause |
ayumilong/rethinkdb | external/v8_3.30.33.16/build/gyp/test/intermediate_dir/gyptest-intermediate-dir.py | 243 | 1398 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that targets have independent INTERMEDIATE_DIRs.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir='src')
test.build('test.gyp', 'target1', chdir='src')
# Check stuff exists.
intermediate_file1 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
shared_intermediate_file1 = test.read('src/shared_outfile.txt')
test.must_contain(shared_intermediate_file1, 'shared_target1')
test.run_gyp('test2.gyp', chdir='src')
# Force the shared intermediate to be rebuilt.
test.sleep()
test.touch('src/shared_infile.txt')
test.build('test2.gyp', 'target2', chdir='src')
# Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR
# file did.
intermediate_file2 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
test.must_contain(intermediate_file2, 'target2')
shared_intermediate_file2 = test.read('src/shared_outfile.txt')
if shared_intermediate_file1 != shared_intermediate_file2:
test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2)
test.must_contain(shared_intermediate_file1, 'shared_target2')
test.must_contain(shared_intermediate_file2, 'shared_target2')
test.pass_test()
| agpl-3.0 |
sbesson/openmicroscopy | components/tools/OmeroWeb/test/integration/test_history.py | 3 | 3141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests display of data in History page."""
from __future__ import print_function
from omeroweb.testlib import IWebTest
from omeroweb.testlib import get, post
from datetime import datetime
from django.core.urlresolvers import reverse
class TestHistory(IWebTest):
"""Tests display of data in History page."""
def test_history(self):
"""Test /webclient/history/ page."""
request_url = reverse("load_template", args=["history"])
response = get(self.django_client, request_url)
assert "history_calendar" in response.content.decode("utf-8")
def test_calendar_default(self):
"""Test display of new Project in today's history page."""
calendar_url = reverse("load_calendar")
response = get(self.django_client, calendar_url)
# Calendar is initially empty (no 'Project' icon)
assert "folder16.png" not in response.content.decode("utf-8")
# Add Project
request_url = reverse("manage_action_containers",
args=["addnewcontainer"])
data = {
'folder_type': 'project',
'name': 'foobar'
}
response = post(self.django_client, request_url, data)
# Default calendar loads calendar for current month
response = get(self.django_client, calendar_url)
# Now contains icon for Project
assert "folder16.png" in response.content.decode("utf-8")
def test_calendar_month(self):
"""Test loading of calendar, specifying this month."""
now = datetime.now()
calendar_url = reverse("load_calendar", args=[now.year, now.month])
print('calendar_url', calendar_url)
response = get(self.django_client, calendar_url)
# Calendar is initially empty (no 'Dataset' icon)
assert "folder_image16.png" not in response.content.decode("utf-8")
# Add Dataset
request_url = reverse("manage_action_containers",
args=["addnewcontainer"])
data = {
'folder_type': 'dataset',
'name': 'foobar'
}
response = post(self.django_client, request_url, data)
# Now contains icon for Dataset
response = get(self.django_client, calendar_url)
assert "folder_image16.png" in response.content.decode("utf-8")
| gpl-2.0 |
michhar/flask-webapp-aml | env1/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py | 152 | 6305 | import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle, text_type
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_encode(s):
if isinstance(s, text_type):
return _b64_encode_str(s)
return _b64_encode_bytes(s)
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode(k), _b64_encode(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode(k), _b64_encode(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
| mit |
JosmanPS/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Lautitia/newfies-dialer | newfies/dialer_campaign/migrations/0003_auto__add_field_campaign_agent_script__add_field_campaign_lead_disposi.py | 9 | 17201 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Campaign.agent_script'
db.add_column(u'dialer_campaign', 'agent_script',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Campaign.lead_disposition'
db.add_column(u'dialer_campaign', 'lead_disposition',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Campaign.agent_script'
db.delete_column(u'dialer_campaign', 'agent_script')
# Deleting field 'Campaign.lead_disposition'
db.delete_column(u'dialer_campaign', 'lead_disposition')
models = {
u'audiofield.audiofile': {
'Meta': {'object_name': 'AudioFile', 'db_table': "u'audio_file'"},
'audio_file': ('audiofield.fields.AudioField', [], {'ext_whitelist': "['.mp3', '.wav', '.ogg']", 'max_length': '100', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dialer_campaign.campaign': {
'Meta': {'object_name': 'Campaign', 'db_table': "u'dialer_campaign'"},
'agent_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'aleg_gateway': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'A-Leg Gateway'", 'to': u"orm['dialer_gateway.Gateway']"}),
'amd_behavior': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'caller_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'callerid': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'callmaxduration': ('django.db.models.fields.IntegerField', [], {'default': "'1800'", 'null': 'True', 'blank': 'True'}),
'calltimeout': ('django.db.models.fields.IntegerField', [], {'default': "'45'", 'null': 'True', 'blank': 'True'}),
'campaign_code': ('django.db.models.fields.CharField', [], {'default': "'PDIWC'", 'unique': 'True', 'max_length': '20', 'blank': 'True'}),
'completed': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'completion_intervalretry': ('django.db.models.fields.IntegerField', [], {'default': "'900'", 'null': 'True', 'blank': 'True'}),
'completion_maxretry': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'daily_start_time': ('django.db.models.fields.TimeField', [], {'default': "'00:00:00'"}),
'daily_stop_time': ('django.db.models.fields.TimeField', [], {'default': "'23:59:59'"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dnc': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'DNC'", 'null': 'True', 'to': u"orm['dnc.DNC']"}),
'expirationdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 24, 0, 0)'}),
'extra_data': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': "'10'", 'null': 'True', 'blank': 'True'}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_been_duplicated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_been_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_phonebook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'intervalretry': ('django.db.models.fields.IntegerField', [], {'default': "'300'", 'null': 'True', 'blank': 'True'}),
'lead_disposition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maxretry': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'null': 'True', 'blank': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'phonebook': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dialer_contact.Phonebook']", 'null': 'True', 'blank': 'True'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'startingdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 23, 0, 0)'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'null': 'True', 'blank': 'True'}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'totalcontact': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Campaign owner'", 'to': u"orm['auth.User']"}),
'voicemail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'voicemail_audiofile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['audiofield.AudioFile']", 'null': 'True', 'blank': 'True'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'dialer_campaign.subscriber': {
'Meta': {'unique_together': "(['contact', 'campaign'],)", 'object_name': 'Subscriber', 'db_table': "u'dialer_subscriber'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_campaign.Campaign']", 'null': 'True', 'blank': 'True'}),
'completion_count_attempt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_contact.Contact']", 'null': 'True', 'blank': 'True'}),
'count_attempt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duplicate_contact': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
},
u'dialer_contact.contact': {
'Meta': {'object_name': 'Contact', 'db_table': "u'dialer_contact'"},
'additional_vars': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'phonebook': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_contact.Phonebook']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'unit_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dialer_contact.phonebook': {
'Meta': {'object_name': 'Phonebook', 'db_table': "u'dialer_phonebook'"},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Phonebook owner'", 'to': u"orm['auth.User']"})
},
u'dialer_gateway.gateway': {
'Meta': {'object_name': 'Gateway', 'db_table': "u'dialer_gateway'"},
'addparameter': ('django.db.models.fields.CharField', [], {'max_length': '360', 'blank': 'True'}),
'addprefix': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'count_call': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count_in_use': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'failover': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Failover Gateway'", 'null': 'True', 'to': u"orm['dialer_gateway.Gateway']"}),
'gateway_codecs': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateway_retries': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateway_timeouts': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateways': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_call': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'originate_dial_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'removeprefix': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'secondused': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dnc.dnc': {
'Meta': {'object_name': 'DNC', 'db_table': "'dnc_list'"},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'DNC owner'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['dialer_campaign']
| mpl-2.0 |
sergiomb2/gdesklets | display/TargetBonoboControl.py | 2 | 1349 | from DisplayTarget import DisplayTarget
from utils.datatypes import *
import gtk
#
# Class for a target that embeds Bonobo controls.
#
class TargetBonoboControl(DisplayTarget):
def __init__(self, name, parent):
# the control; you can load a control only once
self.__control = None
self.__widget = gtk.HBox()
self.__widget.show()
DisplayTarget.__init__(self, name, parent)
self._register_property("oafiid", TYPE_STRING,
self._setp_oafiid, self._getp)
def get_widget(self): return self.__widget
def _setp_oafiid(self, key, value):
import bonobo.ui
try:
container = bonobo.ui.Container()
control = bonobo.ui.Widget(str(value),
container.corba_objref())
pbag = control.get_control_frame().get_control_property_bag()
slots = pbag.getKeys("")
control.show()
if self.__control: # we have to remove the previous control
self.remove( self.__control )
self.__widget.add(control)
self.__control = control
self._setp(key, value)
except StandardError, exc:
log("Warning: An error occurred while setting the oafiid:\n%s" \
% (exc,))
| gpl-2.0 |
alrusdi/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/gdal/tests/test_envelope.py | 332 | 3742 | from django.contrib.gis.gdal import Envelope, OGRException
from django.utils import unittest
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initilization."
e1 = Envelope((0, 0, 5, 5))
e2 = Envelope(0, 0, 5, 5)
e3 = Envelope(0, '0', '5', 5) # Thanks to ww for this
e4 = Envelope(e1._envelope)
self.assertRaises(OGRException, Envelope, (5, 5, 0, 0))
self.assertRaises(OGRException, Envelope, 5, 5, 0, 0)
self.assertRaises(OGRException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(OGRException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, u'foo')
self.assertRaises(OGRException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except OGRException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(EnvelopeTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
GrandmasterK/XScheduler | venv/lib/python2.7/site-packages/rsa/pkcs1.py | 75 | 13170 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for PKCS#1 version 1.5 encryption and signing
This module implements certain functionality from PKCS#1 version 1.5. For a
very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
At least 8 bytes of random padding is used when encrypting a message. This makes
these methods much more secure than the ones in the ``rsa`` module.
WARNING: this module leaks information when decryption or verification fails.
The exceptions that are raised contain the Python traceback information, which
can be used to deduce where in the process the failure occurred. DO NOT PASS
SUCH INFORMATION to your users.
'''
import hashlib
import os
from rsa._compat import b
from rsa import common, transform, core, varblock
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
'''Base class for all exceptions in this module.'''
class DecryptionError(CryptoError):
'''Raised when decryption fails.'''
class VerificationError(CryptoError):
'''Raised when verification fails.'''
def _pad_for_encryption(message, target_length):
r'''Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x02'
>>> block[-6:]
'\x00hello'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
# Get random padding
padding = b('')
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b('\x00'), b(''))
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b('').join([b('\x00\x02'),
padding,
b('\x00'),
message])
def _pad_for_signing(message, target_length):
r'''Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b('').join([b('\x00\x01'),
padding_length * b('\xff'),
b('\x00'),
message])
def encrypt(message, pub_key):
'''Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = 'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
'''
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto, priv_key):
r'''Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt('hello', pub_key)
>>> decrypt(crypto, priv_key)
'hello'
And with binary data:
>>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt('hello', pub_key)
>>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
DecryptionError: Decryption failed
'''
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
cleartext = transform.int2bytes(decrypted, blocksize)
# If we can't find the cleartext marker, decryption failed.
if cleartext[0:2] != b('\x00\x02'):
raise DecryptionError('Decryption failed')
# Find the 00 separator between the padding and the message
try:
sep_idx = cleartext.index(b('\x00'), 2)
except ValueError:
raise DecryptionError('Decryption failed')
return cleartext[sep_idx+1:]
def sign(message, priv_key, hash):
'''Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
'''
# Get the ASN1 code for this hash method
if hash not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash)
asn1code = HASH_ASN1[hash]
# Calculate the hash
hash = _hash(message, hash)
# Encrypt the hash with the private key
cleartext = asn1code + hash
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def verify(message, signature, pub_key):
'''Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
the code the exception occurred, and thus leaks information about the
key. It's only a tiny bit of information, but every bit makes cracking
the keys easier.
'''
blocksize = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, blocksize)
# If we can't find the signature marker, verification failed.
if clearsig[0:2] != b('\x00\x01'):
raise VerificationError('Verification failed')
# Find the 00 separator between the padding and the payload
try:
sep_idx = clearsig.index(b('\x00'), 2)
except ValueError:
raise VerificationError('Verification failed')
# Get the hash and the hash method
(method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
message_hash = _hash(message, method_name)
# Compare the real hash to the hash in the signature
if message_hash != signature_hash:
raise VerificationError('Verification failed')
return True
def _hash(message, method_name):
'''Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
'''
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in varblock.yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(method_hash):
'''Finds the hash method and the hash itself.
:param method_hash: ASN1 code for the hash method concatenated with the
hash itself.
:return: tuple (method, hash) where ``method`` is the used hash method, and
``hash`` is the hash itself.
:raise VerificationFailed: when the hash method cannot be found
'''
for (hashname, asn1code) in HASH_ASN1.items():
if not method_hash.startswith(asn1code):
continue
return (hashname, method_hash[len(asn1code):])
raise VerificationError('Verification failed')
__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
'DecryptionError', 'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
| mit |
higgintop/hca_code_project | node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/win_tool.py | 379 | 11640 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = '%s_%d' % (m.group('out'), os.getpid())
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen(args,
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
joariasl/odoo | addons/account_check_writing/report/check_print.py | 320 | 2943 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_print_check(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_print_check, self).__init__(cr, uid, name, context)
self.number_lines = 0
self.number_add = 0
self.localcontext.update({
'time': time,
'get_lines': self.get_lines,
'fill_stars' : self.fill_stars,
})
def fill_stars(self, amount):
if len(amount) < 100:
stars = 100 - len(amount)
return ' '.join([amount,'*'*stars])
else: return amount
def get_lines(self, voucher_lines):
result = []
self.number_lines = len(voucher_lines)
for i in range(0, min(10,self.number_lines)):
if i < self.number_lines:
res = {
'date_due' : voucher_lines[i].date_due,
'name' : voucher_lines[i].name,
'amount_original' : voucher_lines[i].amount_original and voucher_lines[i].amount_original or False,
'amount_unreconciled' : voucher_lines[i].amount_unreconciled and voucher_lines[i].amount_unreconciled or False,
'amount' : voucher_lines[i].amount and voucher_lines[i].amount or False,
}
else :
res = {
'date_due' : False,
'name' : False,
'amount_original' : False,
'amount_due' : False,
'amount' : False,
}
result.append(res)
return result
class report_check(osv.AbstractModel):
_name = 'report.account_check_writing.report_check'
_inherit = 'report.abstract_report'
_template = 'account_check_writing.report_check'
_wrapped_report_class = report_print_check
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anantb/confer | server/settings.py | 1 | 5344 | # Django settings for confer project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'confer', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'koob', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'gb04mossx%*@tqvjhl3&00=4fv!bsj*4ze9+x7xx5v6m*5l5_*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'server.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'server',
'south',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# server_settings
try:
from server_settings import *
USE_X_FORWARDED_HOST = True
except ImportError:
pass
| mit |
nmercier/linux-cross-gcc | linux/lib/python2.7/encodings/cp855.py | 593 | 34106 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| bsd-3-clause |
J861449197/edx-platform | common/test/acceptance/pages/studio/pagination.py | 165 | 2099 | """
Mixin to include for Paginated container pages
"""
from selenium.webdriver.common.keys import Keys
class PaginatedMixin(object):
"""
Mixin class used for paginated page tests.
"""
def nav_disabled(self, position, arrows=('next', 'previous')):
"""
Verifies that pagination nav is disabled. Position can be 'top' or 'bottom'.
`top` is the header, `bottom` is the footer.
To specify a specific arrow, pass an iterable with a single element, 'next' or 'previous'.
"""
return all([
self.q(css='nav.%s * .%s-page-link.is-disabled' % (position, arrow))
for arrow in arrows
])
def move_back(self, position):
"""
Clicks one of the forward nav buttons. Position can be 'top' or 'bottom'.
"""
self.q(css='nav.%s * .previous-page-link' % position)[0].click()
self.wait_until_ready()
def move_forward(self, position):
"""
Clicks one of the forward nav buttons. Position can be 'top' or 'bottom'.
"""
self.q(css='nav.%s * .next-page-link' % position)[0].click()
self.wait_until_ready()
def go_to_page(self, number):
"""
Enter a number into the page number input field, and then try to navigate to it.
"""
page_input = self.q(css="#page-number-input")[0]
page_input.click()
page_input.send_keys(str(number))
page_input.send_keys(Keys.RETURN)
self.wait_until_ready()
def get_page_number(self):
"""
Returns the page number as the page represents it, in string form.
"""
return self.q(css="span.current-page")[0].get_attribute('innerHTML')
def check_page_unchanged(self, first_block_name):
"""
Used to make sure that a page has not transitioned after a bogus number is given.
"""
if not self.xblocks[0].name == first_block_name:
return False
if not self.q(css='#page-number-input')[0].get_attribute('value') == '':
return False
return True
| agpl-3.0 |
SergeyPirogov/selene | selene/conditions.py | 1 | 9603 | from abc import ABCMeta, abstractmethod
import operator
from future.utils import with_metaclass, lmap
from selene.abctypes.conditions import IEntityCondition
from selene.abctypes.webdriver import IWebDriver
from selene.abctypes.webelement import IWebElement
from selene.exceptions import ConditionMismatchException
class OrNotToBe(IEntityCondition):
def description(self):
return self.__class__.__name__
def fn(self, entity):
return entity
or_not_to_be = OrNotToBe()
class Not(IEntityCondition):
def __init__(self, condition):
# type: (IEntityCondition) -> None
self._condition = condition
def description(self):
return 'not {}'.format(self._condition.description())
def fn(self, entity):
try:
self._condition.fn(entity)
except Exception as reason:
return reason
raise ConditionMismatchException() # todo: add more information to message
not_ = Not
# *** WebDriver Conditions ***
class WebDriverCondition(with_metaclass(ABCMeta, IEntityCondition)):
@abstractmethod
def fn(self, webdriver):
pass
def description(self):
return self.__class__.__name__
class JsReturnedTrue(WebDriverCondition):
def __init__(self, script_to_return_bool):
self.script = script_to_return_bool
def fn(self, webdriver):
# type: (IWebDriver) -> bool
result = webdriver.execute_script(self.script)
if not result:
raise ConditionMismatchException(
expected='''script: {script}
\t\t to return: true'''.format(script=self.script),
actual='''returned: {result}'''.format(result=result))
js_returned_true = JsReturnedTrue
class Title(WebDriverCondition):
def __init__(self, exact_value):
self.expected = exact_value
def fn(self, webdriver):
# type: (IWebDriver) -> bool
actual = webdriver.title
if not self.expected == actual:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
title = Title
class TitleContaining(WebDriverCondition):
def __init__(self, partial_value):
self.expected = partial_value
def fn(self, webdriver):
# type: (IWebDriver) -> bool
actual = webdriver.title
if actual not in self.expected:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
title_containing = TitleContaining
class Url(WebDriverCondition):
def __init__(self, exact_value):
self.expected = exact_value
def fn(self, webdriver):
actual = webdriver.current_url
if not self.expected == actual:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
url = Url
class UrlContaining(WebDriverCondition):
def __init__(self, partial_value):
self.expected = partial_value
def fn(self, webdriver):
actual = webdriver.current_url
if not self.expected in actual:
raise ConditionMismatchException(
message="Page url doesn't contain {}".format(self.expected),
expected=self.expected,
actual=actual)
url_containing = UrlContaining
# *** Element Conditions ***
class ElementCondition(with_metaclass(ABCMeta, IEntityCondition)):
def description(self):
return self.__class__.__name__
def fn(self, element):
# type: (SeleneElement) -> IWebElement
return self.match(element.get_actual_webelement())
@abstractmethod
def match(self, webelement):
# type: (IWebElement) -> IWebElement
pass
def is_matched(condition, webelement):
# type: (ElementCondition, IWebElement) -> bool
try:
condition.match(webelement)
return True
except Exception:
return False
class Visible(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if not webelement.is_displayed():
raise ConditionMismatchException()
return webelement
visible = Visible()
appear = visible
class Hidden(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if webelement.is_displayed():
raise ConditionMismatchException()
return webelement
hidden = Hidden()
disappear = hidden
# todo: consider removing this condition... because it can confuse somebody...
# it's actually kind of "pseudo-clickable", the actual "clackability" depends on js events...
# todo: implement as and_(displayed, enabled)
class Clickable(ElementCondition):
def match(self, webelement):
# type: (IWebElement) -> IWebElement
actual_displayed = webelement.is_displayed()
actual_enabled = webelement.is_enabled()
if not (actual_displayed and actual_enabled):
raise ConditionMismatchException(
expected='displayed and enabled',
actual='displayed: {displayed}, enabled: {enabled}'.format(
displayed=actual_displayed, enabled=actual_enabled))
return webelement
clickable = Clickable()
class Enabled(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if not webelement.is_enabled():
raise ConditionMismatchException()
return webelement
enabled = Enabled()
class InDom(ElementCondition):
"""
checks if element exist in DOM
"""
def match(self, webelement):
return webelement
in_dom = InDom()
exist = in_dom
class Text(ElementCondition):
def __init__(self, expected_text):
self.expected_text = expected_text
def match(self, webelement):
actual_text = webelement.text
if self.expected_text not in actual_text:
raise ConditionMismatchException(expected=self.expected_text, actual=actual_text)
return webelement
text = Text
class ExactText(ElementCondition):
def __init__(self, expected_text):
self.expected_text = expected_text
def match(self, webelement):
actual_text = webelement.text
if not self.expected_text == actual_text:
raise ConditionMismatchException(expected=self.expected_text, actual=actual_text)
return webelement
exact_text = ExactText
class CssClass(ElementCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelement):
actual = webelement.get_attribute("class")
if self.expected not in actual.split():
raise ConditionMismatchException(expected=self.expected, actual='class attribute: {}'.format(actual))
return webelement
css_class = CssClass
class Attribute(ElementCondition):
def __init__(self, name, value):
self.name = name
self.value = value
def match(self, webelement):
actual = webelement.get_attribute(self.name)
if not self.value == actual:
raise ConditionMismatchException(
expected='{name}="{value}"'.format(name=self.name, value=self.value),
actual='{name}="{value}"'.format(name=self.name, value=actual))
return webelement
attribute = Attribute
def value(val):
return Attribute('value', val)
blank = value('')
# *** Collection Conditions ***
class CollectionCondition(with_metaclass(ABCMeta, IEntityCondition)):
def description(self):
return self.__class__.__name__
def fn(self, elements):
# type: (SeleneCollection) -> List[IWebElement]
return self.match(elements.get_actual_webelements())
@abstractmethod
def match(self, webelements):
# type: (List[IWebElement]) -> List[IWebElement]
pass
class Texts(CollectionCondition):
def __init__(self, *expected):
self.expected = expected
def match(self, webelements):
actual = [it.text for it in webelements]
if not (len(actual) == len(self.expected) and all(lmap(operator.contains, actual, self.expected))):
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
texts = Texts
class ExactTexts(CollectionCondition):
def __init__(self, *expected):
self.expected = expected
def match(self, webelements):
actual = [it.text for it in webelements]
if not (len(actual) == len(self.expected) and all(lmap(operator.eq, actual, self.expected))):
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
exact_texts = ExactTexts
class Size(CollectionCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelements):
actual = len(webelements)
if not actual == self.expected:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
size = Size
empty = size(0)
class SizeAtLeast(CollectionCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelements):
actual = len(webelements)
if not actual >= self.expected:
raise ConditionMismatchException(
expected='>= {}'.format(self.expected),
actual=actual)
return webelements
size_at_least = SizeAtLeast | mit |
unindented/streamcode | client/static/jsrepl/extern/python/unclosured/lib/python2.7/mutex.py | 243 | 1877 | """Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
from warnings import warnpy3k
warnpy3k("the mutex module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from collections import deque
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = False
self.queue = deque()
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return True if it succeeded."""
if not self.locked:
self.locked = True
return True
else:
return False
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False
| mit |
anaruse/chainer | tests/chainer_tests/functions_tests/math_tests/test_sparse_matmul.py | 2 | 10645 | import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
from chainer.utils import type_check
_scipy_available = True
try:
from scipy import sparse # NOQA
except ImportError:
_scipy_available = False
def _setup_tensor(_min, _max, shape, dtype, threshold=None):
y = numpy.random.uniform(_min, _max, shape).astype(dtype)
if threshold is not None:
y[y < threshold] = 0
return y
@testing.parameterize(*testing.product_dict(
[
{'m': 2, 'n': 3, 'k': 4},
{'m': 3, 'n': 4, 'k': 2},
],
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
[
{'nbatch': 0}, {'nbatch': 1}, {'nbatch': 4},
],
[
{'a_dtype': numpy.float16},
{'a_dtype': numpy.float32},
{'a_dtype': numpy.float64},
],
[
{'b_dtype': numpy.float16},
{'b_dtype': numpy.float32},
{'b_dtype': numpy.float64},
]
))
class TestCooMatMul(unittest.TestCase):
def setUp(self):
a_shape = self._set_shape([self.m, self.k], self.transa)
b_shape = self._set_shape([self.k, self.n], self.transb)
c_shape = self._set_shape([self.m, self.n], False)
self.c_dtype = numpy.result_type(self.a_dtype, self.b_dtype)
self.a = _setup_tensor(.5, 1, a_shape, self.a_dtype, .75)
self.b = _setup_tensor(.5, 1, b_shape, self.b_dtype, .75)
self.gc = _setup_tensor(-1, 1, c_shape, self.c_dtype)
self.gga = _setup_tensor(.5, 1, a_shape, self.a_dtype)
self.gga[numpy.where(self.a < .75)] = 0
self.ggb = _setup_tensor(.5, 1, b_shape, self.b_dtype)
self.ggb[numpy.where(self.b < .75)] = 0
self.forward_answer = self._matmul(self.a, self.b)
def _set_shape(self, shape, trans):
if trans:
shape = [shape[1], shape[0]]
if self.nbatch > 0:
shape = [self.nbatch, shape[0], shape[1]]
return shape
def _matmul(self, a, b):
if self.transa:
a = a.swapaxes(-1, -2)
if self.transb:
b = b.swapaxes(-1, -2)
if hasattr(numpy, 'matmul'):
return numpy.matmul(a, b)
elif a.ndim == 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
#
# SPDN: sparse A * dense B
#
def check_SPDN_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
sp_a = utils.to_coo(a_data, requires_grad=True)
b = chainer.Variable(b_data)
c = F.sparse_matmul(sp_a, b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_SPDN_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(self.a, self.b)
@attr.gpu
def test_SPDN_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(a, b)
def check_SPDN_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_a = utils.to_coo(a_data)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_backward(
op, (sp_a.data.data, b_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_SPDN_sparse_matmul_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_backward_gpu(self):
self.check_SPDN_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_SPDN_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_a = utils.to_coo(a_data)
sp_gga = utils.to_coo(a_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_double_backward(
op, (sp_a.data.data, b_data),
c_grad, (sp_gga.data.data, b_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_SPDN_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_double_backward_gpu(self):
self.check_SPDN_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
#
# DNSP: dense A * sparse B
#
def check_DNSP_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
a = chainer.Variable(a_data)
sp_b = utils.to_coo(b_data, requires_grad=True)
c = F.sparse_matmul(a, sp_b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_DNSP_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(self.a, self.b)
@attr.gpu
def test_DNSP_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(a, b)
def check_DNSP_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_b = utils.to_coo(b_data)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_backward(
op, (sp_b.data.data, a_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_DNSP_tensordot_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_tensordot_backward_gpu(self):
self.check_DNSP_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_DNSP_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_b = utils.to_coo(b_data)
sp_ggb = utils.to_coo(b_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_double_backward(
op, (sp_b.data.data, a_data),
c_grad, (sp_ggb.data.data, a_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_DNSP_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_sparse_matmul_double_backward_gpu(self):
self.check_DNSP_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
@testing.parameterize(*testing.product_dict(
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
))
class TestCooMatMulInvalid(unittest.TestCase):
def test_invalid_ndim(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_nbatch(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_shape(self):
a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_inputs(self):
a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(ValueError):
F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
with self.assertRaises(ValueError):
F.sparse_matmul(a, b, self.transa, self.transb)
testing.run_module(__name__, __file__)
| mit |
apark263/tensorflow | tensorflow/contrib/optimizer_v2/checkpointable_utils_test.py | 2 | 33567 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
class NonLayerCheckpointable(tracking.AutoCheckpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = util.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = (
util._serialize_object_graph(
root_checkpointable, saveables_cache=None))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
# The optimizer and Dense layers also save get_config() JSON
expected_checkpoint_names.extend([
"model/_second/.ATTRIBUTES/OBJECT_CONFIG_JSON",
"model/_named_dense/.ATTRIBUTES/OBJECT_CONFIG_JSON"
])
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power", optimizer_node.children[0].local_name)
self.assertEqual(
"beta1_power", serialized_graph.nodes[optimizer_node.children[0]
.node_id].attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta_1_power and beta_2_power when appying gradients
# so we can test that they've been restored correctly.
beta1=1.0,
beta2=1.0)
on_create_root = util.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta_1_power, beta_2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta_1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta_2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = util.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = tracking.AutoCheckpointable()
root.var = util.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(util.gather_initializers(
util.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = tracking.AutoCheckpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = util.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = util.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = util.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoCheckpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoCheckpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(util.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
return v, v + 1., v2
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save = save_template()
optimizer = adam.AdamOptimizer(0.0)
save_root = util.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = util.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2 = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(2, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = util.CheckpointableSaver(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_consumed()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
save_path = root.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| apache-2.0 |
reingart/pyafipws | wslpg.py | 1 | 216526 | #!/usr/bin/python
# -*- coding: utf8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Módulo para obtener código de operación electrónico (COE) para
Liquidación Primaria Electrónica de Granos del web service WSLPG de AFIP
"""
__author__ = "Mariano Reingart <[email protected]>"
__copyright__ = "Copyright (C) 2013-2018 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.32a"
LICENCIA = """
wslpg.py: Interfaz para generar Código de Operación Electrónica para
Liquidación Primaria de Granos (LpgService)
Copyright (C) 2013-2018 Mariano Reingart [email protected]
http://www.sistemasagiles.com.ar/trac/wiki/LiquidacionPrimariaGranos
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo respetando la licencia GPLv3.
Para información adicional sobre garantía, soporte técnico comercial
e incorporación/distribución en programas propietarios ver PyAfipWs:
http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs
"""
AYUDA="""
Opciones:
--ayuda: este mensaje
--debug: modo depuración (detalla y confirma las operaciones)
--formato: muestra el formato de los archivos de entrada/salida
--prueba: genera y autoriza una liquidación de prueba (no usar en producción!)
--xml: almacena los requerimientos y respuestas XML (depuración)
--dbf: utilizar tablas DBF (xBase) para los archivos de intercambio
--json: utilizar formato json para el archivo de intercambio
--dummy: consulta estado de servidores
--autorizar: Autorizar Liquidación Primaria de Granos (liquidacionAutorizar)
--ajustar: Ajustar Liquidación Primaria de Granos (liquidacionAjustar)
--anular: Anular una Liquidación Primaria de Granos (liquidacionAnular)
--autorizar-anticipo: Autoriza un Anticipo (lpgAutorizarAnticipo)
--consultar: Consulta una liquidación (parámetros: nro de orden, COE, pdf)
--cancelar-anticipo: anteponer para anticipos (lpgCancelarAnticipo)
--ult: Consulta el último número de orden registrado en AFIP
(liquidacionUltimoNroOrdenConsultar)
--pdf: genera el formulario C 1116 B en formato PDF
--mostrar: muestra el documento PDF generado (usar con --pdf)
--imprimir: imprime el documento PDF generado (usar con --mostrar y --pdf)
--autorizar-lsg: Autoriza una Liquidación Secundaria de Granos (lsgAutorizar)
--lsg --anular: Anula una LSG (lsgAnular)
--lsg --consular: Consulta una LSG por pto_emision, nro_orden o COE
--lsg --ult: Consulta el último Nº LSG emitida (lsgConsultarUltimoNroOrden)
--lsg --asociar: Asocia una liq. sec. a un contrato (lsgAsociarAContrato)
--ajustar-lsg: Ajusta una liquidación secundaria (lsgAjustar por COE/Contrato)
--autorizar-cg: Autorizar Certificación de Granos (cgAutorizar)
--cg --anular: Solicita anulación de un CG (cgSolicitarAnulacion)
--cg --consultar: Consulta una CG por pto_emision, nro_orden o COE
--cg --ult: Consulta el último Nº LSG emitida (cgConsultarUltimoNroOrden)
--informar-calidad: Informa la calidad de una CG (cgInformarCalidad)
--buscar-ctg: devuelve los datos de la CTG a certificar
espera tipo_certificado, cuit_depositante, nro_planta, cod_grano, campania
--buscar-cert-con-saldo-disp: CG disponible para liquidar/retirar/transferir
espera cuit_depositante, cod_grano, campania, coe fecha_emision_des/has
--provincias: obtiene el listado de provincias
--localidades: obtiene el listado de localidades por provincia
--tipograno: obtiene el listado de los tipos de granos disponibles
--campanias: obtiene el listado de las campañas
--gradoref: obtiene el listado de los grados de referencias
--gradoent: obtiene el listado de los grados y valores entregados
--certdeposito: obtiene el listado de los tipos de certificados de depósito
--deducciones: obtiene el listado de los tipos de deducciones
--retenciones: obtiene el listado de los tipos de retenciones
--puertos: obtiene el listado de los puertos habilitados
--actividades: obtiene el listado de las actividades habilitados
--actividadesrep: devuelve las actividades en las que emisor/representado
se encuentra inscripto en RUOCA
--operaciones: obtiene el listado de las operaciones para el representado
Ver wslpg.ini para parámetros de configuración (URL, certificados, etc.)"
"""
import os, sys, shelve
import decimal, datetime
import traceback
import pprint
import warnings
from pysimplesoap.client import SoapFault
from fpdf import Template
import utils
# importo funciones compartidas:
from utils import leer, escribir, leer_dbf, guardar_dbf, N, A, I, json, BaseWS, inicializar_y_capturar_excepciones, get_install_dir
WSDL = "https://fwshomo.afip.gov.ar/wslpg/LpgService?wsdl"
#WSDL = "https://serviciosjava.afip.gob.ar/wslpg/LpgService?wsdl"
#WSDL = "file:wslpg.wsdl"
DEBUG = False
XML = False
CONFIG_FILE = "wslpg.ini"
TIMEOUT = 30
HOMO = False
# definición del formato del archivo de intercambio:
ENCABEZADO = [
('tipo_reg', 1, A), # 0: encabezado liquidación
('nro_orden', 18, N),
('cuit_comprador', 11, N),
('nro_act_comprador', 5, N),
('nro_ing_bruto_comprador', 15, N),
('cod_tipo_operacion', 2, N),
('es_liquidacion_propia', 1, A), # S o N
('es_canje', 1, A), # S o N
('cod_puerto', 4, N),
('des_puerto_localidad', 240, A),
('cod_grano', 3, N),
('cuit_vendedor', 11, N),
('nro_ing_bruto_vendedor', 15, N),
('actua_corredor', 1, A), # S o N
('liquida_corredor', 1, A), # S o N
('cuit_corredor', 11, N),
('nro_ing_bruto_corredor', 15, N),
('comision_corredor', 5, I, 2), # 3.2
('fecha_precio_operacion', 10, A), # 26/02/2013
('precio_ref_tn', 8, I, 3), # 4.3
('cod_grado_ref', 2, A),
('cod_grado_ent', 2, A),
('factor_ent', 6, I, 3), # 3.3
('precio_flete_tn', 7, I, 2), # 5.2
('cont_proteico', 6, I, 3), # 3.3
('alic_iva_operacion', 5, I, 2), # 3.2
('campania_ppal', 4, N),
('cod_localidad_procedencia', 6, N),
('reservado1', 200, A), # datos_adicionales (compatibilidad hacia atras)
('coe', 12, N),
('coe_ajustado', 12, N),
('estado', 2, A),
('total_deduccion', 17, I, 2), # 17.2
('total_retencion', 17, I, 2), # 17.2
('total_retencion_afip', 17, I, 2), # 17.2
('total_otras_retenciones', 17, I, 2), # 17.2
('total_neto_a_pagar', 17, I, 2), # 17.2
('total_iva_rg_4310_18', 17, I, 2), # 17.2 WSLPGv1.20
('total_pago_segun_condicion', 17, I, 2), # 17.2
('fecha_liquidacion', 10, A),
('nro_op_comercial', 10, N),
('precio_operacion', 17, I, 3), # 17.3
('subtotal', 17, I, 2), # 17.2
('importe_iva', 17, I, 2), # 17.2
('operacion_con_iva', 17, I, 2), # 17.2
('total_peso_neto', 8, N), # 17.2
# Campos WSLPGv1.1:
('pto_emision', 4, N),
('cod_prov_procedencia', 2, N),
('peso_neto_sin_certificado', 8, N),
('cod_tipo_ajuste', 2, N),
('val_grado_ent', 4, I, 3), # 1.3
# Campos WSLPGv1.3:
('cod_prov_procedencia_sin_certificado', 2, N),
('cod_localidad_procedencia_sin_certificado', 6, N),
# Campos WSLPGv1.4 (ajustes):
('nro_contrato', 15, N),
('tipo_formulario', 2, N),
('nro_formulario', 12, N),
# datos devuetos:
('total_iva_10_5', 17, I, 2), # 17.2
('total_iva_21', 17, I, 2), # 17.2
('total_retenciones_ganancias', 17, I, 2), # 17.2
('total_retenciones_iva', 17, I, 2), # 17.2
('datos_adicionales', 400, A), # max 400 desde WSLPGv1.2
# Campos agregados WSLPGv1.5 (ajustes):
('iva_deducciones', 17, I, 2), # 17.2
('subtotal_deb_cred', 17, I, 2), # 17.2
('total_base_deducciones', 17, I, 2), # 17.2
# Campos agregados WSLPGv1.6 (liquidación secundaria base):
('cantidad_tn', 11, I, 3), # 8.3
('nro_act_vendedor', 5, N),
# Campos agregados WSLPGv1.9 (liquidación secundaria base):
('total_deducciones', 19, I , 2),
('total_percepciones', 19, I , 2),
]
CERTIFICADO = [
('tipo_reg', 1, A), # 1: Certificado
('reservado1', 2, N), # en WSLPGv1.7 se amplio el campo
('nro_certificado_deposito', 12, N),
('peso_neto', 8, N), # usado peso ajustado WSLPGv1.17
('cod_localidad_procedencia', 6, N),
('cod_prov_procedencia', 2, N),
('reservado', 2, N),
('campania', 4, N),
('fecha_cierre', 10, A),
('peso_neto_total_certificado', 8, N), # para ajuste unificado (WSLPGv1.4)
('coe_certificado_deposito', 12, N), # para certificacion (WSLPGv1.6)
('tipo_certificado_deposito', 3, N), # wSLPGv1.7 agrega valor 332
]
RETENCION = [
('tipo_reg', 1, A), # 2: Retencion
('codigo_concepto', 2, A),
('detalle_aclaratorio', 30, A),
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('nro_certificado_retencion', 14, N),
('fecha_certificado_retencion', 10, A),
('importe_certificado_retencion', 17, I, 2), # 17.2
('importe_retencion', 17, I, 2), # 17.2
]
DEDUCCION = [
('tipo_reg', 1, A), # 3: Deducción
('codigo_concepto', 2, A),
('detalle_aclaratorio', 30, A), # max 50 por WSLPGv1.2
('dias_almacenaje', 4, N),
('reservado1', 6, I, 3),
('comision_gastos_adm', 5, I, 2), # 3.2
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('importe_iva', 17, I, 2), # 17.2
('importe_deduccion', 17, I, 2), # 17.2
('precio_pkg_diario', 11, I, 8), # 3.8, ajustado WSLPGv1.2
]
PERCEPCION = [
('tipo_reg', 1, A), # P: Percepcion
('detalle_aclaratoria', 50, A), # max 50 por WSLPGv1.8
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('importe_final', 19, I, 2), # 17.2 (LPG WSLPGv1.16)
]
OPCIONAL = [
('tipo_reg', 1, A), # O: Opcional
('codigo', 50, A),
('descripcion', 250, A),
]
AJUSTE = [
('tipo_reg', 1, A), # 4: ajuste débito / 5: crédito (WSLPGv1.4)
('concepto_importe_iva_0', 20, A),
('importe_ajustar_iva_0', 15, I, 2), # 11.2
('concepto_importe_iva_105', 20, A),
('importe_ajustar_iva_105', 15, I, 2), # 11.2
('concepto_importe_iva_21', 20, A),
('importe_ajustar_iva_21', 15, I, 2), # 11.2
('diferencia_peso_neto', 8, N),
('diferencia_precio_operacion', 17, I, 3), # 17.3
('cod_grado', 2, A),
('val_grado', 4, I, 3), # 1.3
('factor', 6, I, 3), # 3.3
('diferencia_precio_flete_tn', 7, I, 2), # 5.2
('datos_adicionales', 400, A),
# datos devueltos:
('fecha_liquidacion', 10, A),
('nro_op_comercial', 10, N),
('precio_operacion', 17, I, 3), # 17.3
('subtotal', 17, I, 2), # 17.2
('importe_iva', 17, I, 2), # 17.2
('operacion_con_iva', 17, I, 2), # 17.2
('total_peso_neto', 8, N), # 17.2
('total_deduccion', 17, I, 2), # 17.2
('total_retencion', 17, I, 2), # 17.2
('total_retencion_afip', 17, I, 2), # 17.2
('total_otras_retenciones', 17, I, 2), # 17.2
('total_neto_a_pagar', 17, I, 2), # 17.2
('total_iva_rg_4310_18', 17, I, 2), # 17.2
('total_pago_segun_condicion', 17, I, 2), # 17.2
('iva_calculado_iva_0', 15, I, 2), # 15.2
('iva_calculado_iva_105', 15, I, 2), # 15.2
('iva_calculado_iva_21', 15, I, 2), # 15.2
]
CERTIFICACION = [
('tipo_reg', 1, A), # 7: encabezado certificación
# campos de la cabecera para todas las certificaciones (WSLPGv1.6)
('pto_emision', 4, N),
('nro_orden', 8, N),
('tipo_certificado', 1, A), # P:Primaria,R:Retiro,T:Transferencia,E:Preexistente
('nro_planta', 6, N),
('nro_ing_bruto_depositario', 15, N),
('titular_grano', 1, A), # "P" (Propio) "T" (Tercero)
('cuit_depositante', 11, N), # obligatorio si titular_grano es T
('nro_ing_bruto_depositante', 15, N),
('cuit_corredor', 11, N),
('cod_grano', 3, N),
('campania', 4, N),
('datos_adicionales', 400, A),
('reservado1', 14, A), # reservado para futuros campos (no usar)
# campos para CgAutorizarPrimariaType ex-cgAutorizarDeposito (WSLPGv1.6-1.8)
('nro_act_depositario', 5, N), # nuevo WSLPGv1.8 tambien R/T
('descripcion_tipo_grano', 20, A),
('monto_almacenaje', 10, I, 2),
('monto_acarreo', 10, I, 2),
('monto_gastos_generales', 10, I, 2),
('monto_zarandeo', 10, I, 2),
('porcentaje_secado_de', 5, I, 2),
('porcentaje_secado_a', 5, I, 2),
('monto_secado', 10, I, 2),
('monto_por_cada_punto_exceso', 10, I, 2),
('monto_otros', 10, I, 2),
('reservado_calidad', 35, A), # ver subestructura WSLPGv1.10
('peso_neto_merma_volatil', 10, I , 2),
('porcentaje_merma_secado', 5, I, 2),
('peso_neto_merma_secado', 10, I, 2),
('porcentaje_merma_zarandeo', 5, I, 2),
('peso_neto_merma_zarandeo', 10, I, 2),
('peso_neto_certificado', 10, I, 2), # WSLPGv1.9 2 decimales!
('servicios_secado', 8, I, 3),
('servicios_zarandeo', 8, I, 3),
('servicios_otros', 7, I, 3),
('servicios_forma_de_pago', 20, A),
# campos para cgAutorizarRetiroTransferencia (WSLPGv1.6):
('cuit_receptor', 11, N),
('fecha', 10, A), # no usado WSLPGv1.8
('nro_carta_porte_a_utilizar', 9, N), # obligatorio para retiro
('cee_carta_porte_a_utilizar', 14, N), # no usado WSLPGv1.8
# para cgAutorizarPreexistente (WSLPGv1.6):
('tipo_certificado_deposito_preexistente', 1, N), # "R": Retiro "T": Tra.
('nro_certificado_deposito_preexistente', 12, N),
('cac_certificado_deposito_preexistente', 14, N), # cambio WSLPGv1.8
('fecha_emision_certificado_deposito_preexistente', 10, A),
('peso_neto', 8, N),
# nro_planta definido previamente - agregado WSLPGv1.8
# datos devueltos por el webservice:
('reservado2', 183, N), # padding para futuros campos (no usar)
('coe', 12, N),
('fecha_certificacion', 10, A),
('estado', 2, A),
('reservado3', 101, A), # padding para futuros campos (no usar)
# otros campos devueltos (opcionales)
# 'pesosResumen'
('peso_bruto_certificado', 10, I , 2),
('peso_merma_secado', 10, I , 2),
('peso_merma_zarandeo', 10, I , 2),
# peso_neto_certificado definido arriba
# serviciosResumen
('importe_iva', 10, I , 2),
('servicio_gastos_generales', 10, I , 2),
('servicio_otros', 10, I , 2),
('servicio_total', 10, I , 2),
('servicio_zarandeo', 10, I , 2),
# planta
('cuit_titular_planta', 11, N),
('razon_social_titular_planta', 11, A),
# campos no documentados por AFIP (agregados luego de WSLPGv1.15 a fines Sept)
('servicios_conceptos_no_gravados', 10, I, 2),
('servicios_percepciones_iva', 10, I, 2),
('servicios_otras_percepciones', 10, I, 2),
]
CTG = [ # para cgAutorizarDeposito (WSLPGv1.6)
('tipo_reg', 1, A), # C: CTG
('nro_ctg', 8, N),
('nro_carta_porte', 9, N),
('porcentaje_secado_humedad', 5, I, 2),
('importe_secado', 10, I, 2),
('peso_neto_merma_secado', 10, I, 2),
('tarifa_secado', 10, I, 2),
('importe_zarandeo', 10, I, 2),
('peso_neto_merma_zarandeo', 10, I, 2),
('tarifa_zarandeo', 10, I, 2),
('peso_neto_confirmado_definitivo', 10, I, 2),
]
DET_MUESTRA_ANALISIS = [ # para cgAutorizarDeposito (WSLPGv1.6)
('tipo_reg', 1, A), # D: detalle muestra analisis
('descripcion_rubro', 400, A),
('tipo_rubro', 1, A), # "B" (Bonificación) y "R" (Rebaja)
('porcentaje', 5, I, 2),
('valor', 5, I, 2),
]
CALIDAD = [ # para cgAutorizar y cgInformarCalidad (WSLPGv1.10)
('tipo_reg', 1, A), # Q: caldiad
('analisis_muestra', 10, N),
('nro_boletin', 10, N),
('cod_grado', 2, A), # nuevo WSLPGv1.10: G1 G2 ....
('valor_grado', 4, I, 3), # solo para cod_grado F1 F2 ...
('valor_contenido_proteico', 5, I, 3),
('valor_factor', 6, I, 3),
]
FACTURA_PAPEL = [ # para lsgAjustar (WSLPGv1.15)
('tipo_reg', 1, A), # F: factura papel
('nro_cai', 14, N),
('nro_factura_papel', 12, N),
('fecha_factura', 10, A),
('tipo_comprobante', 3, N),
]
FUSION = [ # para liquidacionAjustarUnificado (WSLPGv1.19)
('tipo_reg', 1, A), # f: fusion
('nro_ing_brutos', 15, N),
('nro_actividad', 5, N),
]
EVENTO = [
('tipo_reg', 1, A), # E: Evento
('codigo', 4, A),
('descripcion', 250, A),
]
ERROR = [
('tipo_reg', 1, A), # R: Error
('codigo', 4, A),
('descripcion', 250, A),
]
DATO = [
('tipo_reg', 1, A), # 9: Dato adicional
('campo', 25, A),
('valor', 250, A),
]
class WSLPG(BaseWS):
"Interfaz para el WebService de Liquidación Primaria de Granos"
_public_methods_ = ['Conectar', 'Dummy', 'SetTicketAcceso', 'DebugLog',
'AutorizarLiquidacion',
'AutorizarLiquidacionSecundaria',
'AnularLiquidacionSecundaria','AnularLiquidacion',
'AutorizarAnticipo', 'CancelarAnticipo',
'CrearLiquidacion', 'CrearLiqSecundariaBase',
'AgregarCertificado', 'AgregarRetencion',
'AgregarDeduccion', 'AgregarPercepcion',
'AgregarOpcional', 'AgregarCalidad',
'AgregarFacturaPapel', 'AgregarFusion',
'ConsultarLiquidacion', 'ConsultarUltNroOrden',
'ConsultarLiquidacionSecundaria',
'ConsultarLiquidacionSecundariaUltNroOrden',
'CrearAjusteBase',
'CrearAjusteDebito', 'CrearAjusteCredito',
'AjustarLiquidacionUnificado',
'AjustarLiquidacionUnificadoPapel',
'AjustarLiquidacionContrato',
'AjustarLiquidacionSecundaria',
'AnalizarAjusteDebito', 'AnalizarAjusteCredito',
'AsociarLiquidacionAContrato', 'ConsultarAjuste',
'ConsultarLiquidacionesPorContrato',
'ConsultarLiquidacionesSecundariasPorContrato',
'AsociarLiquidacionSecundariaAContrato',
'CrearCertificacionCabecera',
'AgregarCertificacionPrimaria',
'AgregarCertificacionRetiroTransferencia',
'AgregarCertificacionPreexistente',
'AgregarDetalleMuestraAnalisis', 'AgregarCTG',
'AutorizarCertificacion',
'InformarCalidadCertificacion', 'BuscarCTG',
'AnularCertificacion',
'ConsultarCertificacion',
'ConsultarCertificacionUltNroOrden',
'BuscarCertConSaldoDisponible',
'LeerDatosLiquidacion',
'ConsultarCampanias',
'ConsultarTipoGrano',
'ConsultarGradoEntregadoXTipoGrano',
'ConsultarCodigoGradoReferencia',
'ConsultarTipoCertificadoDeposito',
'ConsultarTipoDeduccion',
'ConsultarTipoRetencion',
'ConsultarPuerto',
'ConsultarTipoActividad',
'ConsultarTipoActividadRepresentado',
'ConsultarProvincias',
'ConsultarLocalidadesPorProvincia',
'ConsultarTiposOperacion',
'BuscarLocalidades',
'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML',
'SetParametros', 'SetParametro', 'GetParametro',
'CargarFormatoPDF', 'AgregarCampoPDF', 'AgregarDatoPDF',
'CrearPlantillaPDF', 'ProcesarPlantillaPDF',
'GenerarPDF', 'MostrarPDF',
]
_public_attrs_ = ['Token', 'Sign', 'Cuit',
'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
'Excepcion', 'ErrCode', 'ErrMsg', 'LanzarExcepciones', 'Errores',
'XmlRequest', 'XmlResponse', 'Version', 'Traceback', 'InstallDir',
'COE', 'COEAjustado', 'Estado', 'Resultado', 'NroOrden',
'TotalDeduccion', 'TotalRetencion', 'TotalRetencionAfip',
'TotalOtrasRetenciones', 'TotalNetoAPagar', 'TotalPagoSegunCondicion',
'TotalIvaRg4310_18', 'Subtotal', 'TotalIva105', 'TotalIva21',
'TotalRetencionesGanancias', 'TotalRetencionesIVA', 'NroContrato',
'FechaCertificacion',
]
_reg_progid_ = "WSLPG"
_reg_clsid_ = "{9D21C513-21A6-413C-8592-047357692608}"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL
LanzarExcepciones = False
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
def inicializar(self):
BaseWS.inicializar(self)
self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None
self.errores = []
self.COE = self.COEAjustado = ""
self.Estado = self.Resultado = self.NroOrden = self.NroContrato = ''
self.TotalDeduccion = ""
self.TotalRetencion = ""
self.TotalRetencionAfip = ""
self.TotalOtrasRetenciones = ""
self.TotalNetoAPagar = ""
self.TotalIvaRg4310_18 = ""
self.TotalPagoSegunCondicion = ""
self.Subtotal = self.TotalIva105 = self.TotalIva21 = ""
self.TotalRetencionesGanancias = self.TotalRetencionesIVA = ""
self.TotalPercepcion = ""
self.FechaCertificacion = ""
self.datos = {}
@inicializar_y_capturar_excepciones
def Conectar(self, cache=None, url="", proxy="", wrapper="", cacert=None, timeout=30):
"Establecer la conexión a los servidores de la AFIP"
# llamo al constructor heredado:
ok = BaseWS.Conectar(self, cache, url, proxy, wrapper, cacert, timeout)
if ok:
# corrijo ubicación del servidor (puerto htttp 80 en el WSDL)
location = self.client.services['LpgService']['ports']['LpgEndPoint']['location']
if location.startswith("http://"):
print "Corrigiendo WSDL ...", location,
location = location.replace("http://", "https://").replace(":80", ":443")
self.client.services['LpgService']['ports']['LpgEndPoint']['location'] = location
print location
try:
# intento abrir el diccionario persistente de localidades
import wslpg_datos
localidades_db = os.path.join(self.cache, "localidades.dat")
# verificar que puede escribir en el dir, sino abrir solo lectura
flag = os.access(self.cache, os.W_OK) and 'c' or 'r'
wslpg_datos.LOCALIDADES = shelve.open(localidades_db, flag=flag)
if DEBUG: print "Localidades en BD:", len(wslpg_datos.LOCALIDADES)
self.Traceback = "Localidades en BD: %s" % len(wslpg_datos.LOCALIDADES)
except Exception, e:
print "ADVERTENCIA: No se pudo abrir la bbdd de localidades:", e
self.Excepcion = str(e)
return ok
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
errores = []
if 'errores' in ret:
errores.extend(ret['errores'])
if 'erroresFormato' in ret:
errores.extend(ret['erroresFormato'])
if errores:
self.Errores = ["%(codigo)s: %(descripcion)s" % err['error']
for err in errores]
self.errores = [
{'codigo': err['error']['codigo'],
'descripcion': err['error']['descripcion'].replace("\n", "")
.replace("\r", "")}
for err in errores]
self.ErrCode = ' '.join(self.Errores)
self.ErrMsg = '\n'.join(self.Errores)
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
results = self.client.dummy()['return']
self.AppServerStatus = str(results['appserver'])
self.DbServerStatus = str(results['dbserver'])
self.AuthServerStatus = str(results['authserver'])
return True
@inicializar_y_capturar_excepciones
def CrearLiquidacion(self, nro_orden=None, cuit_comprador=None,
nro_act_comprador=None, nro_ing_bruto_comprador=None,
cod_tipo_operacion=None,
es_liquidacion_propia=None, es_canje=None,
cod_puerto=None, des_puerto_localidad=None, cod_grano=None,
cuit_vendedor=None, nro_ing_bruto_vendedor=None,
actua_corredor=None, liquida_corredor=None, cuit_corredor=None,
comision_corredor=None, nro_ing_bruto_corredor=None,
fecha_precio_operacion=None,
precio_ref_tn=None, cod_grado_ref=None, cod_grado_ent=None,
factor_ent=None, precio_flete_tn=None, cont_proteico=None,
alic_iva_operacion=None, campania_ppal=None,
cod_localidad_procedencia=None,
datos_adicionales=None, pto_emision=1, cod_prov_procedencia=None,
peso_neto_sin_certificado=None, val_grado_ent=None,
cod_localidad_procedencia_sin_certificado=None,
cod_prov_procedencia_sin_certificado=None,
nro_contrato=None,
**kwargs
):
"Inicializa internamente los datos de una liquidación para autorizar"
# limpio los campos especiales (segun validaciones de AFIP)
if alic_iva_operacion == 0:
alic_iva_operacion = None # no informar alicuota p/ monotributo
if val_grado_ent == 0:
val_grado_ent = None
# borrando datos corredor si no corresponden
if actua_corredor == "N":
cuit_corredor = None
comision_corredor = None
nro_ing_bruto_corredor = None
# si no corresponde elimino el peso neto certificado campo opcional
if not peso_neto_sin_certificado or not int(peso_neto_sin_certificado):
peso_neto_sin_certificado = None
if cod_puerto and int(cod_puerto) != 14:
des_puerto_localidad = None # validacion 1630
# limpio los campos opcionales para no enviarlos si no corresponde:
if cod_grado_ref == "":
cod_grado_ref = None
if cod_grado_ent == "":
cod_grado_ent = None
if val_grado_ent == 0:
val_grado_ent = None
# creo el diccionario con los campos generales de la liquidación:
self.liquidacion = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
cuitComprador=cuit_comprador,
nroActComprador=nro_act_comprador,
nroIngBrutoComprador=nro_ing_bruto_comprador,
codTipoOperacion=cod_tipo_operacion,
esLiquidacionPropia=es_liquidacion_propia,
esCanje=es_canje,
codPuerto=cod_puerto,
desPuertoLocalidad=des_puerto_localidad,
codGrano=cod_grano,
cuitVendedor=cuit_vendedor,
nroIngBrutoVendedor=nro_ing_bruto_vendedor,
actuaCorredor=actua_corredor,
liquidaCorredor=liquida_corredor,
cuitCorredor=cuit_corredor,
comisionCorredor=comision_corredor,
nroIngBrutoCorredor=nro_ing_bruto_corredor,
fechaPrecioOperacion=fecha_precio_operacion,
precioRefTn=precio_ref_tn,
codGradoRef=cod_grado_ref,
codGradoEnt=cod_grado_ent,
valGradoEnt=val_grado_ent,
factorEnt=factor_ent,
precioFleteTn=precio_flete_tn,
contProteico=cont_proteico,
alicIvaOperacion=alic_iva_operacion,
campaniaPPal=campania_ppal,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
datosAdicionales=datos_adicionales,
pesoNetoSinCertificado=peso_neto_sin_certificado,
numeroContrato=nro_contrato or None,
certificados=[],
)
# para compatibilidad hacia atras, "copiar" los campos si no hay cert:
if peso_neto_sin_certificado:
if cod_localidad_procedencia_sin_certificado is None:
cod_localidad_procedencia_sin_certificado = cod_localidad_procedencia
if cod_prov_procedencia_sin_certificado is None:
cod_prov_procedencia_sin_certificado = cod_prov_procedencia
self.liquidacion.update(dict(
codLocalidadProcedenciaSinCertificado=cod_localidad_procedencia_sin_certificado,
codProvProcedenciaSinCertificado=cod_prov_procedencia_sin_certificado,
))
# inicializo las listas que contentran las retenciones y deducciones:
self.retenciones = []
self.deducciones = []
self.percepciones = []
self.opcionales = [] # para anticipo
# limpio las estructuras internas no utilizables en este caso
self.certificacion = None
return True
@inicializar_y_capturar_excepciones
def CrearLiqSecundariaBase(self, pto_emision=1, nro_orden=None,
nro_contrato=None,
cuit_comprador=None, nro_ing_bruto_comprador=None,
cod_puerto=None, des_puerto_localidad=None,
cod_grano=None, cantidad_tn=None,
cuit_vendedor=None, nro_act_vendedor=None, # nuevo!!
nro_ing_bruto_vendedor=None,
actua_corredor=None, liquida_corredor=None, cuit_corredor=None,
nro_ing_bruto_corredor=None,
fecha_precio_operacion=None, precio_ref_tn=None,
precio_operacion=None, alic_iva_operacion=None, campania_ppal=None,
cod_localidad_procedencia=None, cod_prov_procedencia=None,
datos_adicionales=None,
**kwargs):
"Inicializa los datos de una liquidación secundaria de granos (base)"
# creo el diccionario con los campos generales de la liquidación:
self.liquidacion = dict(
ptoEmision=pto_emision, nroOrden=nro_orden,
numeroContrato=nro_contrato or None, cuitComprador=cuit_comprador,
nroIngBrutoComprador=nro_ing_bruto_comprador,
codPuerto=cod_puerto, desPuertoLocalidad=des_puerto_localidad,
codGrano=cod_grano, cantidadTn=cantidad_tn,
cuitVendedor=cuit_vendedor, nroActVendedor=nro_act_vendedor,
nroIngBrutoVendedor=nro_ing_bruto_vendedor,
actuaCorredor=actua_corredor, liquidaCorredor=liquida_corredor,
cuitCorredor=cuit_corredor or None,
nroIngBrutoCorredor=nro_ing_bruto_corredor or None,
fechaPrecioOperacion=fecha_precio_operacion,
precioRefTn=precio_ref_tn, precioOperacion=precio_operacion,
alicIvaOperacion=alic_iva_operacion or None,
campaniaPPal=campania_ppal,
codLocalidad=cod_localidad_procedencia,
codProvincia=cod_prov_procedencia,
datosAdicionales=datos_adicionales,
)
# inicializo las listas que contentran las retenciones y deducciones:
self.deducciones = []
self.percepciones = []
self.opcionales = []
self.factura_papel = None
return True
@inicializar_y_capturar_excepciones
def AgregarCertificado(self, tipo_certificado_deposito=None,
nro_certificado_deposito=None,
peso_neto=None,
cod_localidad_procedencia=None,
cod_prov_procedencia=None,
campania=None, fecha_cierre=None,
peso_neto_total_certificado=None,
coe_certificado_deposito=None, # WSLPGv1.6
**kwargs):
"Agrego el certificado a la liquidación / certificación de granos"
# limpio campos opcionales:
if not peso_neto_total_certificado:
peso_neto_total_certificado = None # 0 no es válido
# coe_certificado_deposito no es para LPG, unificar en futuras versiones
if tipo_certificado_deposito and int(tipo_certificado_deposito) == 332:
if coe_certificado_deposito and long(coe_certificado_deposito):
nro_certificado_deposito = coe_certificado_deposito
coe_certificado_deposito = None
cert = dict(
tipoCertificadoDeposito=tipo_certificado_deposito,
nroCertificadoDeposito=nro_certificado_deposito,
pesoNeto=peso_neto,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
campania=campania,
fechaCierre=fecha_cierre,
pesoNetoTotalCertificado=peso_neto_total_certificado,
coeCertificadoDeposito=coe_certificado_deposito,
coe=coe_certificado_deposito, # WSLPGv1.17
pesoAjustado=peso_neto, # WSLPGv1.17
)
if self.liquidacion:
self.liquidacion['certificados'].append({'certificado': cert})
else:
self.certificacion['retiroTransferencia']['certificadoDeposito'] = cert
return True
@inicializar_y_capturar_excepciones
def AgregarRetencion(self, codigo_concepto, detalle_aclaratorio,
base_calculo, alicuota,
nro_certificado_retencion=None,
fecha_certificado_retencion=None,
importe_certificado_retencion=None,
**kwargs):
"Agrega la información referente a las retenciones de la liquidación"
# limpio los campos opcionales:
if fecha_certificado_retencion is not None and not fecha_certificado_retencion.strip():
fecha_certificado_retencion = None
if importe_certificado_retencion is not None and not float(importe_certificado_retencion):
importe_certificado_retencion = None
if nro_certificado_retencion is not None and not int(nro_certificado_retencion):
nro_certificado_retencion = None
self.retenciones.append(dict(
retencion=dict(
codigoConcepto=codigo_concepto,
detalleAclaratorio=detalle_aclaratorio,
baseCalculo=base_calculo,
alicuota=alicuota,
nroCertificadoRetencion=nro_certificado_retencion,
fechaCertificadoRetencion=fecha_certificado_retencion,
importeCertificadoRetencion=importe_certificado_retencion,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarDeduccion(self, codigo_concepto=None, detalle_aclaratorio=None,
dias_almacenaje=None, precio_pkg_diario=None,
comision_gastos_adm=None, base_calculo=None,
alicuota=None, **kwargs):
"Agrega la información referente a las deducciones de la liquidación."
# limpiar campo según validación (comision_gastos_adm puede ser 0.00!)
if codigo_concepto != "CO" and comision_gastos_adm is not None \
and float(comision_gastos_adm) == 0:
comision_gastos_adm = None
# no enviar campos para prevenir errores AFIP 1705, 1707, 1708
if base_calculo is not None:
if codigo_concepto == "AL":
base_calculo = None
if codigo_concepto == "CO" and float(base_calculo) == 0:
base_calculo = None # no enviar, por retrocompatibilidad
if codigo_concepto != "AL":
dias_almacenaje = None
precio_pkg_diario = None
self.deducciones.append(dict(
deduccion=dict(
codigoConcepto=codigo_concepto,
detalleAclaratorio=detalle_aclaratorio,
diasAlmacenaje=dias_almacenaje,
precioPKGdiario=precio_pkg_diario,
comisionGastosAdm=comision_gastos_adm,
baseCalculo=base_calculo,
alicuotaIva=alicuota,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarPercepcion(self, codigo_concepto=None, detalle_aclaratoria=None,
base_calculo=None, alicuota=None, importe_final=None,
**kwargs):
"Agrega la información referente a las percepciones de la liquidación"
# liquidación secundaria (sin importe final)
self.percepciones.append(dict(
percepcion=dict(
detalleAclaratoria=detalle_aclaratoria,
baseCalculo=base_calculo,
alicuota=alicuota,
importeFinal=importe_final,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarOpcional(self, codigo=None, descripcion=None, **kwargs):
"Agrega la información referente a los opcionales de la liq. seq."
self.opcionales.append(dict(
opcional=dict(
codigo=codigo,
descripcion=descripcion,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarFacturaPapel(self, nro_cai=None, nro_factura_papel=None,
fecha_factura=None, tipo_comprobante=None,
**kwargs):
self.factura_papel = dict(
nroCAI=nro_cai,
nroFacturaPapel=nro_factura_papel,
fechaFactura=fecha_factura,
tipoComprobante=tipo_comprobante,
)
return True
@inicializar_y_capturar_excepciones
def AutorizarLiquidacion(self):
"Autorizar Liquidación Primaria Electrónica de Granos"
# limpio los elementos que no correspondan por estar vacios:
if not self.liquidacion['certificados']:
del self.liquidacion['certificados']
if not self.retenciones:
self.retenciones = None
if not self.deducciones:
self.deducciones = None
if not self.percepciones:
self.percepciones = None
else:
# ajustar los nombres de campos que varian entre LPG y LSG
for it in self.percepciones:
per = it['percepcion']
per['descripcion'] = per.pop("detalleAclaratoria")
del per['baseCalculo']
del per['alicuota']
# llamo al webservice:
ret = self.client.liquidacionAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
liquidacion=self.liquidacion,
retenciones=self.retenciones,
deducciones=self.deducciones,
percepciones=self.percepciones,
)
# analizo la respusta
ret = ret['liqReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def AutorizarLiquidacionSecundaria(self):
"Autorizar Liquidación Secundaria Electrónica de Granos"
# extraer y adaptar los campos para liq. sec.
if self.deducciones:
self.liquidacion['deduccion'] = []
for it in self.deducciones:
ded = it['deduccion'] # no se agrupa
self.liquidacion['deduccion'].append({
'detalleAclaratoria': ded['detalleAclaratorio'],
'baseCalculo': ded['baseCalculo'],
'alicuotaIVA': ded['alicuotaIva']})
if self.percepciones:
self.liquidacion['percepcion'] = []
for it in self.percepciones:
per = it['percepcion'] # no se agrupa
self.liquidacion['percepcion'].append(per)
if self.opcionales:
self.liquidacion['opcionales'] = self.opcionales # agrupado ok
# llamo al webservice:
ret = self.client.lsgAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
liqSecundariaBase=self.liquidacion,
facturaPapel=self.factura_papel,
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def AutorizarAnticipo(self):
"Autorizar Anticipo de una Liquidación Primaria Electrónica de Granos"
# extraer y adaptar los campos para el anticipo
anticipo = {"liquidacion": self.liquidacion}
liq = anticipo["liquidacion"]
liq["campaniaPpal"] = self.liquidacion["campaniaPPal"]
liq["codLocProcedencia"] = self.liquidacion["codLocalidadProcedencia"]
liq["descPuertoLocalidad"] = self.liquidacion["desPuertoLocalidad"]
if self.opcionales:
liq['opcionales'] = self.opcionales
if self.retenciones:
anticipo['retenciones'] = self.retenciones
if self.deducciones:
anticipo['deducciones'] = self.deducciones
# llamo al webservice:
ret = self.client.lpgAutorizarAnticipo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
anticipo=anticipo,
)
# analizo la respusta
ret = ret['liqReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def CancelarAnticipo(self, pto_emision=None, nro_orden=None, coe=None,
pdf=None):
"Cancelar Anticipo de una Liquidación Primaria Electrónica de Granos"
# llamo al webservice:
ret = self.client.lpgCancelarAnticipo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
ptoEmision=pto_emision,
nroOrden=nro_orden,
pdf="S" if pdf else "N",
)
# analizo la respusta
ret = ret['liqConsReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
aut = ret['autorizacion']
liq = ret['liquidacion']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
def AnalizarLiquidacion(self, aut, liq=None, ajuste=False):
"Método interno para analizar la respuesta de AFIP"
# proceso los datos básicos de la liquidación (devuelto por consultar):
if liq:
self.params_out = dict(
pto_emision=liq.get('ptoEmision'),
nro_orden=liq.get('nroOrden'),
cuit_comprador=liq.get('cuitComprador'),
nro_act_comprador=liq.get('nroActComprador'),
nro_ing_bruto_comprador=liq.get('nroIngBrutoComprador'),
cod_tipo_operacion=liq.get('codTipoOperacion'),
es_liquidacion_propia=liq.get('esLiquidacionPropia'),
es_canje=liq.get('esCanje'),
cod_puerto=liq.get('codPuerto'),
des_puerto_localidad=liq.get('desPuertoLocalidad'),
cod_grano=liq.get('codGrano'),
cuit_vendedor=liq.get('cuitVendedor'),
nro_ing_bruto_vendedor=liq.get('nroIngBrutoVendedor'),
actua_corredor=liq.get('actuaCorredor'),
liquida_corredor=liq.get('liquidaCorredor'),
cuit_corredor=liq.get('cuitCorredor'),
comision_corredor=liq.get('comisionCorredor'),
nro_ing_bruto_corredor=liq.get('nroIngBrutoCorredor'),
fecha_precio_operacion=liq.get('fechaPrecioOperacion'),
precio_ref_tn=liq.get('precioRefTn'),
cod_grado_ref=liq.get('codGradoRef'),
cod_grado_ent=liq.get('codGradoEnt'),
factor_ent=liq.get('factorEnt'),
precio_flete_tn=liq.get('precioFleteTn'),
cont_proteico=liq.get('contProteico'),
alic_iva_operacion=liq.get('alicIvaOperacion'),
campania_ppal=liq.get('campaniaPPal'),
cod_localidad_procedencia=liq.get('codLocalidadProcedencia'),
cod_prov_procedencia=liq.get('codProvProcedencia'),
datos_adicionales=liq.get('datosAdicionales'),
peso_neto_sin_certificado=liq.get('pesoNetoSinCertificado'),
cod_localidad_procedencia_sin_certificado=liq.get('codLocalidadProcedenciaSinCertificado'),
cod_prov_procedencia_sin_certificado=liq.get('codProvProcedenciaSinCertificado'),
certificados=[],
)
if ajuste:
self.params_out.update(
# ajustes:
diferencia_peso_neto=liq.get('diferenciaPesoNeto'),
diferencia_precio_operacion=liq.get('diferenciaPrecioOperacion'),
cod_grado=liq.get('codGrado'),
val_grado=liq.get('valGrado'),
factor=liq.get('factor'),
diferencia_precio_flete_tn=liq.get('diferenciaPrecioFleteTn'),
concepto_importe_iva_0=liq.get('conceptoImporteIva0'),
importe_ajustar_iva_0=liq.get('importeAjustarIva0'),
concepto_importe_iva_105=liq.get('conceptoImporteIva105'),
importe_ajustar_iva_105=liq.get('importeAjustarIva105'),
concepto_importe_iva_21=liq.get('conceptoImporteIva21'),
importe_ajustar_iva_21=liq.get('importeAjustarIva21'),
)
# analizar detalle de importes ajustados discriminados por alicuota
# (por compatibildiad y consistencia se usan los mismos campos)
for it in liq.get("importes", liq.get("importe")):
# en ajustes LSG no se agrupan los importes en un subtipo...
if 'importeReturn' in it:
it = it['importeReturn'][0] # TODO: revisar SOAP
tasa = "iva_%s" % str(it['alicuota']).replace(".", "").strip()
self.params_out["concepto_importe_%s" % tasa] = it['concepto']
self.params_out["importe_ajustar_%s" % tasa] = it['importe']
self.params_out["iva_calculado_%s" % tasa] = it['ivaCalculado']
if 'certificados' in liq:
for c in liq['certificados']:
cert = c['certificado']
self.params_out['certificados'].append(dict(
tipo_certificado_deposito=cert['tipoCertificadoDeposito'],
nro_certificado_deposito=cert['nroCertificadoDeposito'],
peso_neto=cert['pesoNeto'],
cod_localidad_procedencia=cert['codLocalidadProcedencia'],
cod_prov_procedencia=cert['codProvProcedencia'],
campania=cert['campania'],
fecha_cierre=cert['fechaCierre'],
))
self.params_out['errores'] = self.errores
# proceso la respuesta de autorizar, ajustar (y consultar):
if aut:
self.TotalDeduccion = aut.get('totalDeduccion')
self.TotalRetencion = aut.get('totalRetencion')
self.TotalRetencionAfip = aut.get('totalRetencionAfip')
self.TotalOtrasRetenciones = aut.get('totalOtrasRetenciones')
self.TotalNetoAPagar = aut.get('totalNetoAPagar')
self.TotalIvaRg4310_18 = aut.get('totalIvaRg4310_18')
self.TotalPagoSegunCondicion = aut.get('totalPagoSegunCondicion')
self.COE = str(aut.get('coe', ''))
self.COEAjustado = aut.get('coeAjustado')
self.Estado = aut.get('estado', '')
self.NroContrato = aut.get('numeroContrato', '')
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['coe_ajustado'] = self.COEAjustado
self.params_out['estado'] = self.Estado
self.params_out['total_deduccion'] = self.TotalDeduccion
self.params_out['total_retencion'] = self.TotalRetencion
self.params_out['total_retencion_afip'] = self.TotalRetencionAfip
self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones
self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar
self.params_out['total_iva_rg_4310_18'] = self.TotalIvaRg4310_18
self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion
# datos adicionales:
self.NroOrden = self.params_out['nro_orden'] = aut.get('nroOrden')
self.params_out['cod_tipo_ajuste'] = aut.get('codTipoAjuste')
fecha = aut.get('fechaLiquidacion')
if fecha:
fecha = str(fecha)
self.params_out['fecha_liquidacion'] = fecha
self.params_out['importe_iva'] = aut.get('importeIva')
self.params_out['nro_op_comercial'] = aut.get('nroOpComercial')
self.params_out['operacion_con_iva'] = aut.get('operacionConIva')
self.params_out['precio_operacion'] = aut.get('precioOperacion')
self.params_out['total_peso_neto'] = aut.get('totalPesoNeto')
self.params_out['subtotal'] = aut.get('subTotal')
# LSG (especificos):
self.params_out['total_deducciones'] = aut.get('totalDeducciones')
if 'todalPercepciones' in aut:
# error de tipeo en el WSDL de AFIP...
self.params_out['total_percepciones'] = aut.get('todalPercepciones')
else:
self.params_out['total_percepciones'] = aut.get('totalPercepciones')
# sub estructuras:
self.params_out['retenciones'] = []
self.params_out['deducciones'] = []
self.params_out['percepciones'] = []
for retret in aut.get("retenciones", []):
retret = retret['retencionReturn']
self.params_out['retenciones'].append({
'importe_retencion': retret['importeRetencion'],
'alicuota': retret['retencion'].get('alicuota'),
'base_calculo': retret['retencion'].get('baseCalculo'),
'codigo_concepto': retret['retencion'].get('codigoConcepto'),
'detalle_aclaratorio': (retret['retencion'].get('detalleAclaratorio') or "").replace("\n", ""),
'importe_certificado_retencion': retret['retencion'].get('importeCertificadoRetencion'),
'nro_certificado_retencion': retret['retencion'].get('nroCertificadoRetencion'),
'fecha_certificado_retencion': retret['retencion'].get('fechaCertificadoRetencion'),
})
for dedret in aut.get("deducciones", []):
dedret = dedret['deduccionReturn']
self.params_out['deducciones'].append({
'importe_deduccion': dedret['importeDeduccion'],
'importe_iva': dedret.get('importeIva'),
'alicuota': dedret['deduccion'].get('alicuotaIva'),
'base_calculo': dedret['deduccion'].get('baseCalculo'),
'codigo_concepto': dedret['deduccion'].get('codigoConcepto'),
'detalle_aclaratorio': dedret['deduccion'].get('detalleAclaratorio', "").replace("\n", ""),
'dias_almacenaje': dedret['deduccion'].get('diasAlmacenaje'),
'precio_pkg_diario': dedret['deduccion'].get('precioPKGdiario'),
'comision_gastos_adm': dedret['deduccion'].get('comisionGastosAdm'),
})
for perret in aut.get("percepciones", []):
perret = perret.get('percepcionReturn', perret)
self.params_out['percepciones'].append({
'importe_final': perret['percepcion']['importeFinal'],
'alicuota': perret['percepcion'].get('alicuota'),
'base_calculo': perret['percepcion'].get('baseCalculo'),
'descripcion': perret['percepcion'].get('descripcion', "").replace("\n", ""),
})
@inicializar_y_capturar_excepciones
def CrearAjusteBase(self,
pto_emision=1, nro_orden=None, # unificado, contrato, papel
coe_ajustado=None, # unificado
nro_contrato=None, # contrato
tipo_formulario=None, # papel
nro_formulario=None, # papel
actividad=None, # contrato / papel
cod_grano=None, # contrato / papel
cuit_vendedor=None, # contrato / papel
cuit_comprador=None, # contrato / papel
cuit_corredor=None, # contrato / papel
nro_ing_bruto_vendedor=None, # papel
nro_ing_bruto_comprador=None, # papel
nro_ing_bruto_corredor=None, # papel
tipo_operacion=None, # papel
precio_ref_tn=None, # contrato
cod_grado_ent=None, # contrato
val_grado_ent=None, # contrato
precio_flete_tn=None, # contrato
cod_puerto=None, # contrato
des_puerto_localidad=None, # contrato
cod_provincia=None, # unificado, contrato, papel
cod_localidad=None, # unificado, contrato, papel
comision_corredor=None, # papel
**kwargs
):
"Inicializa internamente los datos de una liquidación para ajustar"
# ajusto nombre de campos para compatibilidad hacia atrás (encabezado):
if 'cod_localidad_procedencia' in kwargs:
cod_localidad = kwargs['cod_localidad_procedencia']
if 'cod_provincia_procedencia' in kwargs:
cod_provincia = kwargs['cod_provincia_procedencia']
if 'nro_act_comprador' in kwargs:
actividad = kwargs['nro_act_comprador']
if 'cod_tipo_operacion' in kwargs:
tipo_operacion = kwargs['cod_tipo_operacion']
# limpio los campos especiales (segun validaciones de AFIP)
if val_grado_ent == 0:
val_grado_ent = None
# borrando datos si no corresponden
if cuit_corredor and int(cuit_corredor) == 0:
cuit_corredor = None
comision_corredor = None
nro_ing_bruto_corredor = None
if cod_puerto and int(cod_puerto) != 14:
des_puerto_localidad = None # validacion 1630
# limpio los campos opcionales para no enviarlos si no corresponde:
if cod_grado_ent == "":
cod_grado_ent = None
if val_grado_ent == 0:
val_grado_ent = None
# creo el diccionario con los campos generales del ajuste base:
self.ajuste = { 'ajusteBase': {
'ptoEmision': pto_emision,
'nroOrden': nro_orden,
'coeAjustado': coe_ajustado,
'nroContrato': nro_contrato,
'tipoFormulario': tipo_formulario,
'nroFormulario': nro_formulario,
'actividad': actividad,
'codGrano': cod_grano,
'cuitVendedor': cuit_vendedor,
'cuitComprador': cuit_comprador,
'cuitCorredor': cuit_corredor,
'nroIngBrutoVendedor': nro_ing_bruto_vendedor,
'nroIngBrutoComprador': nro_ing_bruto_comprador,
'nroIngBrutoCorredor': nro_ing_bruto_corredor,
'tipoOperacion': tipo_operacion,
'codPuerto': cod_puerto,
'desPuertoLocalidad': des_puerto_localidad,
'comisionCorredor': comision_corredor,
'precioRefTn': precio_ref_tn,
'codGradoEnt': cod_grado_ent,
'valGradoEnt': val_grado_ent,
'precioFleteTn': precio_flete_tn,
'codLocalidad': cod_localidad,
'codProv': cod_provincia,
'certificados': [],
}
}
# para compatibilidad con AgregarCertificado
self.liquidacion = self.ajuste['ajusteBase']
# inicializar temporales
self.__ajuste_base = None
self.__ajuste_debito = None
self.__ajuste_credito = None
return True
@inicializar_y_capturar_excepciones
def CrearAjusteCredito(self,
datos_adicionales=None, # unificado, contrato, papel
concepto_importe_iva_0=None, # unificado, contrato, papel
importe_ajustar_iva_0=None, # unificado, contrato, papel
concepto_importe_iva_105=None, # unificado, contrato, papel
importe_ajustar_iva_105=None, # unificado, contrato, papel
concepto_importe_iva_21=None, # unificado, contrato, papel
importe_ajustar_iva_21=None, # unificado, contrato, papel
diferencia_peso_neto=None, # unificado
diferencia_precio_operacion=None, # unificado
cod_grado=None, # unificado
val_grado=None, # unificado
factor=None, # unificado
diferencia_precio_flete_tn=None, # unificado
**kwargs
):
"Inicializa internamente los datos del crédito del ajuste"
self.ajuste['ajusteCredito'] = {
'diferenciaPesoNeto': diferencia_peso_neto,
'diferenciaPrecioOperacion': diferencia_precio_operacion,
'codGrado': cod_grado,
'valGrado': val_grado,
'factor': factor,
'diferenciaPrecioFleteTn': diferencia_precio_flete_tn,
'datosAdicionales': datos_adicionales,
'opcionales': None,
'conceptoImporteIva0': concepto_importe_iva_0,
'importeAjustarIva0': importe_ajustar_iva_0,
'conceptoImporteIva105': concepto_importe_iva_105,
'importeAjustarIva105': importe_ajustar_iva_105,
'conceptoImporteIva21': concepto_importe_iva_21,
'importeAjustarIva21': importe_ajustar_iva_21,
'deducciones': [],
'retenciones': [],
'percepciones': [],
'certificados': [],
}
# vinculación con AgregarOpcional:
self.opcionales = self.ajuste['ajusteCredito']['opcionales']
# vinculación con AgregarRetencion y AgregarDeduccion
self.deducciones = self.ajuste['ajusteCredito']['deducciones']
self.retenciones = self.ajuste['ajusteCredito']['retenciones']
# para LSG:
self.percepciones = self.ajuste['ajusteCredito']['percepciones']
# para compatibilidad con AgregarCertificado (WSLPGv1.17)
self.liquidacion = self.ajuste['ajusteCredito']
return True
@inicializar_y_capturar_excepciones
def CrearAjusteDebito(self,
datos_adicionales=None, # unificado, contrato, papel
concepto_importe_iva_0=None, # unificado, contrato, papel
importe_ajustar_iva_0=None, # unificado, contrato, papel
concepto_importe_iva_105=None, # unificado, contrato, papel
importe_ajustar_iva_105=None, # unificado, contrato, papel
concepto_importe_iva_21=None, # unificado, contrato, papel
importe_ajustar_iva_21=None, # unificado, contrato, papel
diferencia_peso_neto=None, # unificado
diferencia_precio_operacion=None, # unificado
cod_grado=None, # unificado
val_grado=None, # unificado
factor=None, # unificado
diferencia_precio_flete_tn=None, # unificado
**kwargs
):
"Inicializa internamente los datos del crédito del ajuste"
self.ajuste['ajusteDebito'] = {
'diferenciaPesoNeto': diferencia_peso_neto,
'diferenciaPrecioOperacion': diferencia_precio_operacion,
'codGrado': cod_grado,
'valGrado': val_grado,
'factor': factor,
'diferenciaPrecioFleteTn': diferencia_precio_flete_tn,
'datosAdicionales': datos_adicionales,
'opcionales': None,
'conceptoImporteIva0': concepto_importe_iva_0,
'importeAjustarIva0': importe_ajustar_iva_0,
'conceptoImporteIva105': concepto_importe_iva_105,
'importeAjustarIva105': importe_ajustar_iva_105,
'conceptoImporteIva21': concepto_importe_iva_21,
'importeAjustarIva21': importe_ajustar_iva_21,
'deducciones': [],
'retenciones': [],
'percepciones': [],
'certificados': [],
}
# vinculación con AgregarOpcional:
self.opcionales = self.ajuste['ajusteDebito']['opcionales']
# vinculación con AgregarRetencion y AgregarDeduccion
self.deducciones = self.ajuste['ajusteDebito']['deducciones']
self.retenciones = self.ajuste['ajusteDebito']['retenciones']
# para LSG:
self.percepciones = self.ajuste['ajusteDebito']['percepciones']
# para compatibilidad con AgregarCertificado (WSLPGv1.17)
self.liquidacion = self.ajuste['ajusteDebito']
return True
def AgregarFusion(self, nro_ing_brutos, nro_actividad, **kwargs):
"Datos de comprador o vendedor según liquidación a ajustar (fusión.)"
self.ajuste['ajusteBase']['fusion'] = {'nroIngBrutos': nro_ing_brutos,
'nroActividad': nro_actividad,
}
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionUnificado(self):
"Ajustar Liquidación Primaria de Granos"
# limpiar estructuras no utilizadas (si no hay deducciones / retenciones)
for k in ('ajusteDebito', 'ajusteCredito'):
if not any(self.ajuste[k].values()):
del self.ajuste[k]
else:
if not self.ajuste[k]['deducciones']:
del self.ajuste[k]['deducciones']
if not self.ajuste[k]['retenciones']:
del self.ajuste[k]['retenciones']
# llamar al webservice:
ret = self.client.liquidacionAjustarUnificado(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
# analizar el resultado:
ret = ret['ajusteUnifReturn']
self.__analizar_errores(ret)
if 'ajusteUnificado' in ret:
aut = ret['ajusteUnificado']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionUnificadoPapel(self):
"Ajustar Liquidación realizada en un formulario F1116 B / C (papel)"
# limpiar arrays no enviados:
if not self.ajuste['ajusteBase']['certificados']:
del self.ajuste['ajusteBase']['certificados']
for k1 in ('ajusteCredito', 'ajusteDebito'):
for k2 in ('retenciones', 'deducciones'):
if not self.ajuste[k1][k2]:
del self.ajuste[k1][k2]
ret = self.client.liquidacionAjustarUnificadoPapel(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
ret = ret['ajustePapelReturn']
self.__analizar_errores(ret)
if 'ajustePapel' in ret:
aut = ret['ajustePapel']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionContrato(self):
"Ajustar Liquidación activas relacionadas a un contrato"
# limpiar arrays no enviados:
if not self.ajuste['ajusteBase']['certificados']:
del self.ajuste['ajusteBase']['certificados']
for k1 in ('ajusteCredito', 'ajusteDebito'):
for k2 in ('retenciones', 'deducciones'):
if not self.ajuste[k1][k2]:
del self.ajuste[k1][k2]
ret = self.client.liquidacionAjustarContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
ret = ret['ajusteContratoReturn']
self.__analizar_errores(ret)
if 'ajusteContrato' in ret:
aut = ret['ajusteContrato']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionSecundaria(self):
"Ajustar Liquidación Secundaria de Granos"
# limpiar estructuras no utilizadas (si no hay deducciones / retenciones)
for k in ('ajusteDebito', 'ajusteCredito'):
if k not in self.ajuste:
# ignorar si no se agrego estructura ajuste credito / debito
continue
elif not any(self.ajuste[k].values()):
# eliminar estructura vacia credito / debito
del self.ajuste[k]
else:
# ajustar cambios de nombre entre LSG y LPG
for tasa in ("0", "105", "21"):
tasa_lsg = "10" if tasa == "105" else tasa
self.ajuste[k]['importeAjustar%s' % tasa_lsg] = self.ajuste[k]['importeAjustarIva%s' % tasa]
self.ajuste[k]['conceptoIva%s' % tasa_lsg] = self.ajuste[k]['conceptoImporteIva%s' % tasa]
# no enviar tag percepciones vacio (no agrupar en subtipo)
if self.ajuste[k]['percepciones']:
self.ajuste[k]['percepcion'] = [
per["percepcion"] for per
in self.ajuste[k]['percepciones']]
del self.ajuste[k]['percepciones']
base = self.ajuste['ajusteBase']
base['coe'] = base['coeAjustado']
base['codProvincia'] = base['codProv']
# llamar al webservice:
if base['nroContrato'] is not None and long(base['nroContrato']):
metodo = self.client.lsgAjustarXContrato
else:
metodo = self.client.lsgAjustarXCoe
ret = metodo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ajusteCredito=self.ajuste.get('ajusteCredito'),
ajusteDebito=self.ajuste.get('ajusteDebito'),
**base
)
# analizar el resultado:
ret = ret['oReturn']
self.__analizar_errores(ret)
if ret:
self.AnalizarAjuste(ret)
return True
def AnalizarAjuste(self, aut, base=True):
"Método interno para analizar la respuesta de AFIP (ajustes)"
self.__ajuste_base = None
self.__ajuste_debito = None
self.__ajuste_credito = None
# para compatibilidad con la generacion de PDF (completo datos)
if hasattr(self, "liquidacion") and self.liquidacion and base:
self.AnalizarLiquidacion(aut=None, liq=self.liquidacion)
self.params_out['errores'] = self.errores
# proceso la respuesta de autorizar, ajustar (y consultar):
if aut:
# en caso de anulación o no ser ajuste, ahora no devuelve datos:
self.COE = str(aut.get('coe', ""))
self.COEAjustado = aut.get('coeAjustado')
self.NroContrato = aut.get('nroContrato')
self.Estado = aut.get('estado', "")
totunif = aut.get("totalesUnificados") or {}
self.Subtotal = totunif.get('subTotalGeneral')
self.TotalIva105 = totunif.get('iva105')
self.TotalIva21 = totunif.get('iva21')
self.TotalRetencionesGanancias = totunif.get('retencionesGanancias')
self.TotalRetencionesIVA = totunif.get('retencionesIVA')
self.TotalOtrasRetenciones = totunif.get('importeOtrasRetenciones')
self.TotalNetoAPagar = totunif.get('importeNeto')
self.TotalIvaRg4310_18 = totunif.get('ivaRG4310_18')
self.TotalPagoSegunCondicion = totunif.get('pagoSCondicion')
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['coe_ajustado'] = self.COEAjustado
self.params_out['estado'] = self.Estado
self.params_out['nro_orden'] = aut.get('nroOrden')
self.params_out['cod_tipo_operacion'] = aut.get('codTipoOperacion')
self.params_out['nro_contrato'] = aut.get('nroContrato')
self.params_out['nro_op_comercial'] = aut.get('nroOpComercial', "")
# actualizo totales solo para ajuste base (liquidacion general)
if base:
self.params_out['subtotal'] = self.Subtotal
self.params_out['iva_deducciones'] = totunif.get('ivaDeducciones')
self.params_out['subtotal_deb_cred'] = totunif.get('subTotalDebCred')
self.params_out['total_base_deducciones'] = totunif.get('totalBaseDeducciones')
self.params_out['total_iva_10_5'] = self.TotalIva105
self.params_out['total_iva_21'] = self.TotalIva21
self.params_out['total_retenciones_ganancias'] = self.TotalRetencionesGanancias
self.params_out['total_retenciones_iva'] = self.TotalRetencionesIVA
self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones
self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar
self.params_out['total_iva_rg_4310_18'] = self.TotalIvaRg4310_18
self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion
# almaceno los datos de ajustes crédito y débito para usarlos luego
self.__ajuste_base = aut
self.__ajuste_debito = aut.get('ajusteDebito') or {}
self.__ajuste_credito = aut.get('ajusteCredito') or {}
return True
@inicializar_y_capturar_excepciones
def AnalizarAjusteDebito(self):
"Método para analizar la respuesta de AFIP para Ajuste Debito"
# para compatibilidad con la generacion de PDF (completo datos)
liq = {}
if hasattr(self, "liquidacion") and self.liquidacion:
liq.update(self.liquidacion)
if hasattr(self, "ajuste") and 'ajusteDebito' in self.ajuste:
liq.update(self.ajuste['ajusteDebito'])
if self.__ajuste_debito:
liq.update(self.__ajuste_debito)
self.AnalizarLiquidacion(aut=self.__ajuste_debito, liq=liq, ajuste=True)
self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales
return True
@inicializar_y_capturar_excepciones
def AnalizarAjusteCredito(self):
"Método para analizar la respuesta de AFIP para Ajuste Credito"
liq = {}
if hasattr(self, "liquidacion") and self.liquidacion:
liq.update(self.liquidacion)
if hasattr(self, "ajuste") and 'ajusteCredito' in self.ajuste:
liq.update(self.ajuste['ajusteCredito'])
if self.__ajuste_credito:
liq.update(self.__ajuste_credito)
self.AnalizarLiquidacion(aut=self.__ajuste_credito, liq=liq, ajuste=True)
self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales
return True
@inicializar_y_capturar_excepciones
def CrearCertificacionCabecera(self, pto_emision=1, nro_orden=None,
tipo_certificado=None, nro_planta=None,
nro_ing_bruto_depositario=None, titular_grano=None,
cuit_depositante=None, nro_ing_bruto_depositante=None,
cuit_corredor=None, cod_grano=None, campania=None,
datos_adicionales=None,
**kwargs):
"Inicializa los datos de una certificación de granos (cabecera)"
self.certificacion = {}
self.certificacion['cabecera'] = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
tipoCertificado=tipo_certificado,
nroPlanta=nro_planta or None, # opcional
nroIngBrutoDepositario=nro_ing_bruto_depositario,
titularGrano=titular_grano,
cuitDepositante=cuit_depositante or None, # opcional
nroIngBrutoDepositante=nro_ing_bruto_depositante or None, # opcional
cuitCorredor=cuit_corredor or None, # opcional
codGrano=cod_grano,
campania=campania,
datosAdicionales=datos_adicionales, # opcional
)
# limpio las estructuras internas no utilizables en este caso
self.liquidacion = None
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionPrimaria(self,
nro_act_depositario=None,
descripcion_tipo_grano=None,
monto_almacenaje=None, monto_acarreo=None,
monto_gastos_generales=None, monto_zarandeo=None,
porcentaje_secado_de=None, porcentaje_secado_a=None,
monto_secado=None, monto_por_cada_punto_exceso=None,
monto_otros=None,
porcentaje_merma_volatil=None, peso_neto_merma_volatil=None,
porcentaje_merma_secado=None, peso_neto_merma_secado=None,
porcentaje_merma_zarandeo=None, peso_neto_merma_zarandeo=None,
peso_neto_certificado=None, servicios_secado=None,
servicios_zarandeo=None, servicios_otros=None,
servicios_forma_de_pago=None,
**kwargs):
# compatibilidad hacia atras: utilizar nuevos campos mas amplio
v = None
if 'servicio_otros' in kwargs:
v = kwargs.get('servicio_otros')
if isinstance(v, basestring) and v and not v.isalpha():
v = float(v)
if v:
servicios_otros = v
if not v:
warnings.warn("Usar servicio_otros para mayor cantidad de digitos")
self.certificacion['primaria'] = dict(
nroActDepositario=nro_act_depositario,
ctg=[], # <!--0 or more repetitions:-->
descripcionTipoGrano=descripcion_tipo_grano,
montoAlmacenaje=monto_almacenaje,
montoAcarreo=monto_acarreo,
montoGastosGenerales=monto_gastos_generales,
montoZarandeo=monto_zarandeo,
porcentajeSecadoDe=porcentaje_secado_de,
porcentajeSecadoA=porcentaje_secado_a,
montoSecado=monto_secado,
montoPorCadaPuntoExceso=monto_por_cada_punto_exceso,
montoOtros=monto_otros,
porcentajeMermaVolatil=porcentaje_merma_volatil,
pesoNetoMermaVolatil=peso_neto_merma_volatil,
porcentajeMermaSecado=porcentaje_merma_secado,
pesoNetoMermaSecado=peso_neto_merma_secado,
porcentajeMermaZarandeo=porcentaje_merma_zarandeo,
pesoNetoMermaZarandeo=peso_neto_merma_zarandeo,
pesoNetoCertificado=peso_neto_certificado,
serviciosSecado=servicios_secado or None, # opcional
serviciosZarandeo=servicios_zarandeo or None,
serviciosOtros=servicios_otros or None,
serviciosFormaDePago=servicios_forma_de_pago or None,
)
# si se pasan campos no documentados por AFIP, intentar enviarlo:
for k, kk in {
'servicios_conceptos_no_gravados': 'serviciosConceptosNoGravados',
'servicios_percepciones_iva': 'serviciosPercepcionesIva',
'servicios_otras_percepciones': 'serviciosOtrasPercepciones',
}.items():
v = kwargs.get(k)
# cuidado: si AFIP retira el campo, puede fallar si se pasa en 0
if isinstance(v, basestring) and v and not v.isalpha():
v = float(v)
if v:
self.certificacion['primaria'][kk] = v
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionRetiroTransferencia(self,
nro_act_depositario=None,
cuit_receptor=None,
fecha=None,
nro_carta_porte_a_utilizar=None,
cee_carta_porte_a_utilizar=None,
**kwargs):
self.certificacion['retiroTransferencia'] = dict(
nroActDepositario=nro_act_depositario,
cuitReceptor=cuit_receptor or None, # opcional
fecha=fecha,
nroCartaPorteAUtilizar=nro_carta_porte_a_utilizar or None,
ceeCartaPorteAUtilizar=cee_carta_porte_a_utilizar or None,
certificadoDeposito=[], # <!--0 or more repetitions:-->
)
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionPreexistente(self,
tipo_certificado_deposito_preexistente=None,
nro_certificado_deposito_preexistente=None,
cac_certificado_deposito_preexistente=None,
fecha_emision_certificado_deposito_preexistente=None,
peso_neto=None, nro_planta=None,
**kwargs):
self.certificacion['preexistente'] = dict(
tipoCertificadoDepositoPreexistente=tipo_certificado_deposito_preexistente,
nroCertificadoDepositoPreexistente=nro_certificado_deposito_preexistente,
cacCertificadoDepositoPreexistente=cac_certificado_deposito_preexistente,
fechaEmisionCertificadoDepositoPreexistente=fecha_emision_certificado_deposito_preexistente,
pesoNeto=peso_neto, nroPlanta=nro_planta,
)
return True
@inicializar_y_capturar_excepciones
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None,
cod_grado=None, valor_grado=None,
valor_contenido_proteico=None, valor_factor=None,
**kwargs):
"Agrega la información sobre la calidad, al autorizar o posteriormente"
self.certificacion['primaria']['calidad'] = dict(
analisisMuestra=analisis_muestra,
nroBoletin=nro_boletin,
codGrado=cod_grado, # G1 G2 G3 F1 F2 F3
valorGrado=valor_grado or None, # opcional
valorContProteico=valor_contenido_proteico,
valorFactor=valor_factor,
detalleMuestraAnalisis=[], # <!--1 or more repetitions:-->
)
return True
@inicializar_y_capturar_excepciones
def AgregarDetalleMuestraAnalisis(self, descripcion_rubro=None,
tipo_rubro=None, porcentaje=None,
valor=None,
**kwargs):
"Agrega la información referente al detalle de la certificación"
det = dict(
descripcionRubro=descripcion_rubro,
tipoRubro=tipo_rubro,
porcentaje=porcentaje,
valor=valor,
)
self.certificacion['primaria']['calidad']['detalleMuestraAnalisis'].append(det)
return True
@inicializar_y_capturar_excepciones
def BuscarCTG(self, tipo_certificado="P", cuit_depositante=None,
nro_planta=None, cod_grano=2, campania=1314,
nro_ctg=None, tipo_ctg=None, nro_carta_porte=None,
fecha_confirmacion_ctg_des=None,
fecha_confirmacion_ctg_has=None,
):
"Devuelve los CTG/Carta de porte que se puede incluir en un certificado"
ret = self.client.cgBuscarCtg(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
tipoCertificado=tipo_certificado,
cuitDepositante=cuit_depositante or self.Cuit,
nroPlanta=nro_planta,
codGrano=cod_grano, campania=campania,
nroCtg=nro_ctg, tipoCtg=tipo_ctg,
nroCartaPorte=nro_carta_porte,
fechaConfirmacionCtgDes=fecha_confirmacion_ctg_des,
fechaConfirmacionCtgHas=fecha_confirmacion_ctg_has,
)['oReturn']
self.__analizar_errores(ret)
array = ret.get('ctg', [])
self.Excepcion = self.Traceback = ""
self.params_out['ctgs'] = []
for ctg in array:
self.params_out['ctgs'].append({
'campania': ctg.get('campania'),
'nro_planta': ctg.get('nroPlanta'),
'nro_ctg': ctg.get('nroCtg'),
'tipo_ctg': ctg.get('tipoCtg'),
'nro_carta_porte': ctg.get('nroCartaPorte'),
'kilos_confirmados': ctg.get('kilosConfirmados'),
'fecha_confirmacion_ctg': ctg.get('fechaConfirmacionCtg'),
'cod_grano': ctg.get('codGrano'),
'cuit_remitente_comercial': ctg.get('cuitRemitenteComercial'),
'cuit_liquida': ctg.get('cuitLiquida'),
'cuit_certifica': ctg.get('cuitCertifica'),
})
return True
@inicializar_y_capturar_excepciones
def AgregarCTG(self, nro_ctg=None, nro_carta_porte=None,
porcentaje_secado_humedad=None, importe_secado=None,
peso_neto_merma_secado=None, tarifa_secado=None,
importe_zarandeo=None, peso_neto_merma_zarandeo=None,
tarifa_zarandeo=None,
peso_neto_confirmado_definitivo=None,
**kwargs):
"Agrega la información referente a una CTG de la certificación"
ctg = dict(
nroCTG=nro_ctg,
nroCartaDePorte=nro_carta_porte,
pesoNetoConfirmadoDefinitivo=peso_neto_confirmado_definitivo,
porcentajeSecadoHumedad=porcentaje_secado_humedad,
importeSecado=importe_secado,
pesoNetoMermaSecado=peso_neto_merma_secado,
tarifaSecado=tarifa_secado,
importeZarandeo=importe_zarandeo,
pesoNetoMermaZarandeo=peso_neto_merma_zarandeo,
tarifaZarandeo=tarifa_zarandeo,
)
self.certificacion['primaria']['ctg'].append(ctg)
return True
@inicializar_y_capturar_excepciones
def BuscarCertConSaldoDisponible(self, cuit_depositante=None,
cod_grano=2, campania=1314, coe=None,
fecha_emision_des=None,
fecha_emision_has=None,
):
"""Devuelve los certificados de depósito en los que un productor tiene
saldo disponible para Liquidar/Retirar/Transferir"""
ret = self.client.cgBuscarCertConSaldoDisponible(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
cuitDepositante=cuit_depositante or self.Cuit,
codGrano=cod_grano, campania=campania,
coe=coe,
fechaEmisionDes=fecha_emision_des,
fechaEmisionHas=fecha_emision_has,
)['oReturn']
self.__analizar_errores(ret)
array = ret.get('certificado', [])
self.Excepcion = self.Traceback = ""
self.params_out['certificados'] = []
for cert in array:
self.params_out['certificados'].append(dict(
coe=cert['coe'],
tipo_certificado=cert['tipoCertificado'],
campania=cert['campania'],
cuit_depositante=cert['cuitDepositante'],
cuit_depositario=cert['cuitDepositario'],
nro_planta=cert['nroPlanta'],
kilos_disponibles=cert['kilosDisponibles'],
cod_grano=cert['codGrano'],
))
return True
@inicializar_y_capturar_excepciones
def AutorizarCertificacion(self):
"Autoriza una Certificación Primaria de Depósito de Granos (C1116A/RT)"
# limpio los elementos que no correspondan por estar vacios:
for k1 in ('primaria', 'retiroTransferencia'):
dic = self.certificacion.get(k1)
if not dic: continue
for k2 in ('ctg', 'detalleMuestraAnalisis', 'certificadoDeposito'):
if k2 in dic and not dic[k2]:
del dic[k2]
# llamo al webservice:
ret = self.client.cgAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.certificacion
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True
def AnalizarAutorizarCertificadoResp(self, ret):
"Metodo interno para extraer datos de la Respuesta de Certificación"
aut = ret.get('autorizacion')
if aut:
self.PtoEmision = aut['ptoEmision']
self.NroOrden = aut['nroOrden']
self.FechaCertificacion = str(aut.get('fechaCertificacion', ""))
self.COE = str(aut['coe'])
self.Estado = aut['estado']
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['estado'] = self.Estado
self.params_out['nro_orden'] = self.NroOrden
self.params_out['fecha_certificacion'] = self.FechaCertificacion.replace("-", "")
if "planta" in aut:
p = aut.get("planta")
self.params_out['nro_planta'] = p.get("nroPlanta")
self.params_out['cuit_titular_planta'] = p.get("cuitTitularPlanta")
self.params_out['razon_social_titular_planta'] = p.get("razonSocialTitularPlanta")
# otros campos devueltos (opcionales)
p = aut.get('pesosResumen', {})
self.params_out['peso_bruto_certificado'] = p.get("pesoBrutoCertificado")
self.params_out['peso_merma_secado'] = p.get("pesoMermaSecado")
self.params_out['peso_merma_volatil'] = p.get("pesoMermaVolatil")
self.params_out['peso_merma_zarandeo'] = p.get("pesoMermaZarandeo")
self.params_out['peso_neto_certificado'] = p.get("pesoNetoCertificado")
p = aut.get('serviciosResumen', {})
self.params_out['importe_iva'] = p.get("importeIVA")
self.params_out['servicio_gastos_generales'] = p.get("servicioGastosGenerales")
self.params_out['servicio_otros'] = p.get("servicioOtros")
self.params_out['servicio_total'] = p.get("servicioTotal")
self.params_out['servicio_zarandeo'] = p.get("servicioZarandeo")
# datos devueltos según el tipo de certificacion (consultas):
cab = ret.get('cabecera')
if cab:
self.params_out['pto_emision'] = cab.get('ptoEmision')
self.params_out['nro_orden'] = cab.get('nroOrden')
self.params_out['tipo_certificado'] = cab.get('tipoCertificado')
self.params_out['nro_planta'] = cab.get('nroPlanta')
self.params_out['nro_ing_bruto_depositario'] = cab.get('nroIngBrutoDepositario')
self.params_out['titular_grano'] = cab.get('titularGrano')
self.params_out['cuit_depositante'] = cab.get('cuitDepositante')
self.params_out['nro_ing_bruto_depositante'] = cab.get('nroIngBrutoDepositante')
self.params_out['cuit_corredor'] = cab.get('cuitCorredor')
self.params_out['cod_grano'] = cab.get('codGrano')
self.params_out['campania'] = cab.get('campania')
self.params_out['datos_adicionales'] = cab.get('datosAdicionales')
pri = ret.get('primaria')
if pri:
self.params_out['nro_act_depositario'] = pri.get('nroActDepositario')
self.params_out['descripcion_tipo_grano'] = pri.get('descripcionTipoGrano')
self.params_out['monto_almacenaje'] = pri.get('montoAlmacenaje')
self.params_out['monto_acarreo'] = pri.get('montoAcarreo')
self.params_out['monto_gastos_generales'] = pri.get('montoGastosGenerales')
self.params_out['monto_zarandeo'] = pri.get('montoZarandeo')
self.params_out['porcentaje_secado_de'] = pri.get('porcentajeSecadoDe')
self.params_out['porcentaje_secado_a'] = pri.get('porcentajeSecadoA')
self.params_out['monto_secado'] = pri.get('montoSecado')
self.params_out['monto_por_cada_punto_exceso'] = pri.get('montoPorCadaPuntoExceso')
self.params_out['monto_otros'] = pri.get('montoOtros')
self.params_out['porcentaje_merma_volatil'] = pri.get('porcentajeMermaVolatil')
self.params_out['porcentaje_merma_secado'] = pri.get('porcentajeMermaSecado')
self.params_out['peso_neto_merma_secado'] = pri.get('pesoNetoMermaSecado')
self.params_out['porcentaje_merma_zarandeo'] = pri.get('pesoNetoMermaZarandeo')
self.params_out['peso_neto_certificado'] = pri.get('pesoNetoCertificado')
self.params_out['servicios_secado'] = pri.get('serviciosSecado')
self.params_out['servicios_zarandeo'] = pri.get('serviciosZarandeo')
self.params_out['servicios_otros'] = pri.get('serviciosOtros')
self.params_out['servicios_forma_de_pago'] = pri.get('serviciosFormaDePago')
# otros campos no documentados:
self.params_out['servicios_conceptos_no_gravados'] = pri.get("serviciosConceptosNoGravados")
self.params_out['servicios_percepciones_iva'] = pri.get("serviciosPercepcionesIVA")
self.params_out['servicios_otras_percepciones'] = pri.get("serviciosOtrasPercepciones")
# sub estructuras:
self.params_out['ctgs'] = []
self.params_out['det_muestra_analisis'] = []
for ctg in pri.get("ctg", []):
self.params_out['ctgs'].append({
'nro_ctg': ctg.get('nroCTG'),
'nro_carta_porte': ctg.get('nroCartaDePorte'),
'peso_neto_confirmado_definitivo': ctg.get('pesoNetoConfirmadoDefinitivo'),
'porcentaje_secado_humedad': ctg.get('porcentajeSecadoHumedad'),
'importe_secado': ctg.get('importeSecado'),
'peso_neto_merma_secado': ctg.get('pesoNetoMermaSecado'),
'importe_zarandeo': ctg.get('importeZarandeo'),
'peso_neto_merma_zarandeo': ctg.get('pesoNetoMermaZarandeo'),
'tarifa_zarandeo': ctg.get('tarifaZarandeo'),
})
self.params_out['calidad'] = []
for cal in [pri.get("calidad", {})]:
self.params_out['calidad'].append({
'analisis_muestra': cal.get('analisisMuestra'),
'nro_boletin': cal.get('nroBoletin'),
'nro_act_depositario': cal.get('nroActDepositario'),
'cod_grado': cal.get('codGrado'),
'valor_grado': cal.get('valorGrado'),
'valor_contenido_proteico': cal.get('valorContProteico'),
'valor_factor': cal.get('valorFactor')
})
for det in cal.get("detalleMuestraAnalisis", []):
self.params_out['det_muestra_analisis'].append({
'descripcion_rubro': det.get('descripcionRubro'),
'tipo_rubro': det.get('tipoRubro'),
'porcentaje': det.get('porcentaje'),
'valor': det.get('valor'),
})
rt = ret.get('retiroTransferencia')
if rt:
self.params_out['nro_act_depositario'] = rt.get('nroActDepositario')
self.params_out['cuit_receptor'] = rt.get('cuitReceptor')
self.params_out['nro_carta_porte_a_utilizar'] = rt.get('nroCartaPorteAUtilizar')
# sub estructuras:
self.params_out['certificados'] = []
cert = rt.get("certificadoDeposito")
if cert:
self.params_out['certificados'].append({
'coe_certificado_deposito': cert.get('coeCertificadoDeposito'),
'peso_neto': cert.get('pesoNeto'),
})
pre = ret.get('preexistente')
if pre:
self.params_out['nro_planta'] = pre.get('nroPlanta')
self.params_out['tipo_certificado_deposito_preexistente'] = pre.get('tipoCertificadoDepositoPreexistente')
self.params_out['nro_certificado_deposito_preexistente'] = pre.get('nroCertificadoDepositoPreexistente')
self.params_out['cac_certificado_deposito_preexistente'] = pre.get('cacCertificadoDepositoPreexistente')
self.params_out['fecha_emision_certificado_deposito_preexistente'] = pre.get('fechaEmisionCertificadoDepositoPreexistente')
self.params_out['peso_neto'] = pre.get('pesoNeto')
self.params_out['errores'] = self.errores
@inicializar_y_capturar_excepciones
def InformarCalidadCertificacion(self, coe):
"Informar calidad de un certificado (C1116A/RT)"
# llamo al webservice:
ret = self.client.cgInformarCalidad(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
calidad=self.certificacion['primaria']['calidad'],
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True
@inicializar_y_capturar_excepciones
def AnularCertificacion(self, coe):
"Anular liquidación activa"
ret = self.client.cgSolicitarAnulacion(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
self.Estado = ret.get('estadoCertificado', "")
return self.COE
@inicializar_y_capturar_excepciones
def AsociarLiquidacionAContrato(self, coe=None, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Asociar una Liquidación a un contrato"
ret = self.client.asociarLiquidacionAContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liquidacion']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
# analizo la respusta
liq = ret['liquidacion']
aut = ret['autorizacion']
self.AnalizarLiquidacion(aut, liq)
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionesPorContrato(self, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Obtener los COE de liquidaciones relacionadas a un contrato"
ret = self.client.liquidacionPorContratoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liqPorContratoCons']
self.__analizar_errores(ret)
if 'coeRelacionados' in ret:
# analizo la respuesta = [{'coe': "...."}]
self.DatosLiquidacion = sorted(ret['coeRelacionados'])
# establezco el primer COE
self.LeerDatosLiquidacion()
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacion(self, pto_emision=None, nro_orden=None, coe=None,
pdf=None):
"Consulta una liquidación por No de orden"
if coe:
ret = self.client.liquidacionXCoeConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.liquidacionXNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['liqConsReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
aut = ret['autorizacion']
liq = ret['liquidacion']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionSecundaria(self, pto_emision=None, nro_orden=None,
coe=None, pdf=None):
"Consulta una liquidación sequndaria por No de orden o coe"
if coe:
ret = self.client.lsgConsultarXCoe(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.lsgConsultarXNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
for it in ret['liquidaciones']:
aut = it['autorizacion']
if 'liquidacion' in it:
liq = it['liquidacion']
elif 'ajuste' in it:
liq = it['ajuste']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionesSecundariasPorContrato(self, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Obtener los COE de liquidaciones relacionadas a un contrato"
ret = self.client.lsgConsultarXContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liqPorContratoCons']
self.__analizar_errores(ret)
if 'coeRelacionados' in ret:
# analizo la respuesta = [{'coe': "...."}]
self.DatosLiquidacion = sorted(ret['coeRelacionados'])
# establezco el primer COE
self.LeerDatosLiquidacion()
return True
@inicializar_y_capturar_excepciones
def AsociarLiquidacionSecundariaAContrato(self, coe=None, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Asociar una Liquidación a un contrato"
ret = self.client.lsgAsociarAContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
# analizo la respusta
liq = ret['liquidacion']
aut = ret['autorizacion']
self.AnalizarLiquidacion(aut, liq)
return True
@inicializar_y_capturar_excepciones
def ConsultarCertificacion(self, pto_emision=None, nro_orden=None,
coe=None, pdf=None):
"Consulta una certificacion por No de orden o COE"
if coe:
ret = self.client.cgConsultarXCoe(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.cgConsultarXNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
if 'autorizacion' in ret:
self.AnalizarAutorizarCertificadoResp(ret)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarAjuste(self, pto_emision=None, nro_orden=None, nro_contrato=None,
coe=None, pdf=None):
"Consulta un ajuste de liquidación por No de orden o numero de contrato"
if nro_contrato:
ret = self.client.ajustePorContratoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
)
ret = ret['ajusteContratoReturn']
elif coe is None or pdf is None:
ret = self.client.ajusteXNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
pdf='S' if pdf else 'N',
)
ret = ret['ajusteXNroOrdenConsReturn']
else:
ret = self.client.ajusteXCoeConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
ret = ret['ajusteConsReturn']
self.__analizar_errores(ret)
if 'ajusteUnificado' in ret:
aut = ret['ajusteUnificado']
self.AnalizarAjuste(aut)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado"
ret = self.client.liquidacionUltimoNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionSecundariaUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para LSG"
ret = self.client.lsgConsultarUltimoNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def ConsultarCertificacionUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para CG"
ret = self.client.cgConsultarUltimoNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def LeerDatosLiquidacion(self, pop=True):
"Recorro los datos devueltos y devuelvo el primero si existe"
if self.DatosLiquidacion:
# extraigo el primer item
if pop:
datos_liq = self.DatosLiquidacion.pop(0)
else:
datos_liq = self.DatosLiquidacion[0]
self.COE = str(datos_liq['coe'])
self.Estado = unicode(datos_liq.get('estado', ""))
return self.COE
else:
return ""
@inicializar_y_capturar_excepciones
def AnularLiquidacion(self, coe):
"Anular liquidación activa"
ret = self.client.liquidacionAnular(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['anulacionReturn']
self.__analizar_errores(ret)
self.Resultado = ret['resultado']
return self.COE
@inicializar_y_capturar_excepciones
def AnularLiquidacionSecundaria(self, coe):
"Anular liquidación secundaria activa"
ret = self.client.lsgAnular(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['anulacionReturn']
self.__analizar_errores(ret)
self.Resultado = ret['resultado']
return self.COE
def ConsultarCampanias(self, sep="||"):
ret = self.client.campaniasConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['campaniaReturn']
self.__analizar_errores(ret)
array = ret.get('campanias', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoGrano(self, sep="||"):
ret = self.client.tipoGranoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoGranoReturn']
self.__analizar_errores(ret)
array = ret.get('granos', [])
if sep is None:
return dict([(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarCodigoGradoReferencia(self, sep="||"):
"Consulta de Grados según Grano."
ret = self.client.codigoGradoReferenciaConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['gradoRefReturn']
self.__analizar_errores(ret)
array = ret.get('gradosRef', [])
if sep is None:
return dict([(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarGradoEntregadoXTipoGrano(self, cod_grano, sep="||"):
"Consulta de Grado y Valor según Grano Entregado."
ret = self.client.codigoGradoEntregadoXTipoGranoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
codGrano=cod_grano,
)['gradoEntReturn']
self.__analizar_errores(ret)
array = ret.get('gradoEnt', [])
if sep is None:
return dict([(it['gradoEnt']['codigoDescripcion']['codigo'],
it['gradoEnt']['valor'])
for it in array])
else:
return [("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) %
(it['gradoEnt']['codigoDescripcion']['codigo'],
it['gradoEnt']['codigoDescripcion']['descripcion'],
it['gradoEnt']['valor'],
)
for it in array]
def ConsultarTipoCertificadoDeposito(self, sep="||"):
"Consulta de tipos de Certificados de Depósito"
ret = self.client.tipoCertificadoDepositoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoCertDepReturn']
self.__analizar_errores(ret)
array = ret.get('tiposCertDep', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoDeduccion(self, sep="||"):
"Consulta de tipos de Deducciones"
ret = self.client.tipoDeduccionConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoDeduccionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposDeduccion', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoRetencion(self, sep="||"):
"Consulta de tipos de Retenciones."
ret = self.client.tipoRetencionConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoRetencionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposRetencion', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarPuerto(self, sep="||"):
"Consulta de Puertos habilitados"
ret = self.client.puertoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['puertoReturn']
self.__analizar_errores(ret)
array = ret.get('puertos', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoActividad(self, sep="||"):
"Consulta de Tipos de Actividad."
ret = self.client.tipoActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
array = ret.get('tiposActividad', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoActividadRepresentado(self, sep="||"):
"Consulta de Tipos de Actividad inscripta en el RUOCA."
try:
ret = self.client.tipoActividadRepresentadoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
array = ret.get('tiposActividad', [])
self.Excepcion = self.Traceback = ""
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
except Exception:
ex = utils.exception_info()
self.Excepcion = ex['msg']
self.Traceback = ex['tb']
if sep:
return ["ERROR"]
def ConsultarProvincias(self, sep="||"):
"Consulta las provincias habilitadas"
ret = self.client.provinciasConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['provinciasReturn']
self.__analizar_errores(ret)
array = ret.get('provincias', [])
if sep is None:
return dict([(int(it['codigoDescripcion']['codigo']),
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarLocalidadesPorProvincia(self, codigo_provincia, sep="||"):
ret = self.client.localidadXProvinciaConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
codProvincia=codigo_provincia,
)['localidadesReturn']
self.__analizar_errores(ret)
array = ret.get('localidades', [])
if sep is None:
return dict([(str(it['codigoDescripcion']['codigo']),
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def BuscarLocalidades(self, cod_prov, cod_localidad=None, consultar=True):
"Devuelve la localidad o la consulta en AFIP (uso interno)"
# si no se especifíca cod_localidad, es util para reconstruir la cache
import wslpg_datos as datos
if not str(cod_localidad) in datos.LOCALIDADES and consultar:
d = self.ConsultarLocalidadesPorProvincia(cod_prov, sep=None)
try:
# actualizar el diccionario persistente (shelve)
datos.LOCALIDADES.update(d)
except Exception, e:
print "EXCEPCION CAPTURADA", e
# capturo errores por permisos (o por concurrencia)
datos.LOCALIDADES = d
return datos.LOCALIDADES.get(str(cod_localidad), "")
def ConsultarTiposOperacion(self, sep="||"):
"Consulta tipo de Operación por Actividad."
ops = []
ret = self.client.tipoActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
for it_act in ret.get('tiposActividad', []):
ret = self.client.tipoOperacionXActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroActLiquida=it_act['codigoDescripcion']['codigo'],
)['tipoOperacionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposOperacion', [])
if sep:
ops.extend([("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) %
(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
ops.extend([(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
return ops
# Funciones para generar PDF:
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"):
"Cargo el formato de campos a generar desde una planilla CSV"
# si no encuentro archivo, lo busco en el directorio predeterminado:
if not os.path.exists(archivo):
archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo))
if DEBUG: print "abriendo archivo ", archivo
# inicializo la lista de los elementos:
self.elements = []
for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()):
if DEBUG: print "procesando linea ", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
# corrijo path relativo para las imágenes:
if args[1] == 'I':
if not os.path.exists(args[14]):
args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14]))
if DEBUG: print "NUEVO PATH:", args[14]
self.AgregarCampoPDF(*args)
self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
if HOMO:
self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
# cargo los elementos en la plantilla
self.template.load_elements(self.elements)
return True
def AgregarCampoPDF(self, nombre, tipo, x1, y1, x2, y2,
font="Arial", size=12,
bold=False, italic=False, underline=False,
foreground= 0x000000, background=0xFFFFFF,
align="L", text="", priority=0, **kwargs):
"Agrego un campo a la plantilla"
# convierto colores de string (en hexadecimal)
if isinstance(foreground, basestring): foreground = int(foreground, 16)
if isinstance(background, basestring): background = int(background, 16)
if isinstance(text, unicode): text = text.encode("latin1")
field = {
'name': nombre,
'type': tipo,
'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'font': font, 'size': size,
'bold': bold, 'italic': italic, 'underline': underline,
'foreground': foreground, 'background': background,
'align': align, 'text': text, 'priority': priority}
field.update(kwargs)
self.elements.append(field)
return True
def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"):
"Iniciar la creación del archivo PDF"
# genero el renderizador con propiedades del PDF
t = Template(
format=papel, orientation=orientacion,
title="F 1116 B/C %s" % (self.NroOrden),
author="CUIT %s" % self.Cuit,
subject="COE %s" % self.params_out.get('coe'),
keywords="AFIP Liquidacion Electronica Primaria de Granos",
creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,)
self.template = t
return True
def AgregarDatoPDF(self, campo, valor, pagina='T'):
"Agrego un dato a la factura (internamente)"
# corrijo path relativo para las imágenes (compatibilidad hacia atrás):
if campo == 'fondo' and valor.startswith(self.InstallDir):
if not os.path.exists(valor):
valor = os.path.join(self.InstallDir, "plantillas", os.path.basename(valor))
if DEBUG: print "NUEVO PATH:", valor
self.datos[campo] = valor
return True
def ProcesarPlantillaPDF(self, num_copias=1, lineas_max=24, qty_pos='izq',
clave=''):
"Generar el PDF según la factura creada y plantilla cargada"
try:
f = self.template
liq = self.params_out
# actualizo los campos según la clave (ajuste debitos / creditos)
if clave and clave in liq:
liq = liq.copy()
liq.update(liq[clave]) # unificar con AnalizarAjusteCredito/Debito
if HOMO:
self.AgregarDatoPDF("homo", u"HOMOLOGACIÓN")
copias = {1: 'Original', 2: 'Duplicado', 3: 'Triplicado',
4: 'Cuadruplicado', 5: 'Quintuplicado'}
# convierto el formato de intercambio para representar los valores:
fmt_encabezado = dict([(v[0], v[1:]) for v in ENCABEZADO])
fmt_deduccion = dict([(v[0], v[1:]) for v in DEDUCCION])
fmt_retencion = dict([(v[0], v[1:]) for v in RETENCION])
def formatear(campo, valor, formato):
"Convertir el valor a una cadena correctamente s/ formato ($ % ...)"
if campo in formato and v is not None:
fmt = formato[campo]
if fmt[1] == N:
if 'cuit' in campo:
c = str(valor)
if len(c) == 11:
valor = "%s-%s-%s" % (c[0:2], c[2:10], c[10:])
else:
valor = ""
elif 'peso' in campo:
valor = "%s Kg" % valor
elif valor is not None and valor != "":
valor = "%d" % int(valor)
else:
valor = ""
elif fmt[1] == I:
valor = ("%%0.%df" % fmt[2]) % valor
if 'alic' in campo or 'comision' in campo:
valor = valor + " %"
elif 'factor' in campo or 'cont' in campo or 'cant' in campo:
pass
else:
valor = "$ " + valor
elif 'fecha' in campo:
d = valor
if isinstance(d, (datetime.date, datetime.datetime)):
valor = d.strftime("%d/%m/%Y")
else:
valor = "%s/%s/%s" % (d[8:10], d[5:7], d[0:4])
return valor
def buscar_localidad_provincia(cod_prov, cod_localidad):
"obtener la descripción de la provincia/localidad (usar cache)"
cod_prov = int(cod_prov)
cod_localidad = str(cod_localidad)
provincia = datos.PROVINCIAS[cod_prov]
localidad = self.BuscarLocalidades(cod_prov, cod_localidad)
return localidad, provincia
# divido los datos adicionales (debe haber renglones 1 al 9):
if liq.get('datos_adicionales') and f.has_key('datos_adicionales1'):
d = liq.get('datos_adicionales')
for i, ds in enumerate(f.split_multicell(d, 'datos_adicionales1')):
liq['datos_adicionales%s' % (i + 1)] = ds
for copia in range(1, num_copias+1):
# completo campos y hojas
f.add_page()
f.set('copia', copias.get(copia, "Adicional %s" % copia))
f.set('anulado', {'AC': '', '': 'SIN ESTADO',
'AN': "ANULADO"}.get(liq['estado'], "ERROR"))
try:
cod_tipo_ajuste = int(liq["cod_tipo_ajuste"] or '0')
except:
cod_tipo_ajuste = None
f.set('tipo_ajuste', {3: u'Liquidación de Débito',
4: u'Liquidación de Crédito',
}.get(cod_tipo_ajuste, ''))
# limpio datos del corredor si no corresponden:
if liq.get('actua_corredor', 'N') == 'N':
if liq.get('cuit_corredor', None) == 0:
del liq['cuit_corredor']
# establezco campos según tabla encabezado:
for k,v in liq.items():
v = formatear(k, v, fmt_encabezado)
if isinstance(v, (basestring, int, long, float)):
f.set(k, v)
elif isinstance(v, decimal.Decimal):
f.set(k, str(v))
elif isinstance(v, datetime.datetime):
f.set(k, str(v))
import wslpg_datos as datos
campania = int(liq.get('campania_ppal') or 0)
f.set("campania_ppal", datos.CAMPANIAS.get(campania, campania))
f.set("tipo_operacion", datos.TIPOS_OP.get(int(liq.get('cod_tipo_operacion') or 0), ""))
f.set("actividad", datos.ACTIVIDADES.get(int(liq.get('nro_act_comprador') or 0), ""))
if 'cod_grano' in liq and liq['cod_grano']:
cod_grano = int(liq['cod_grano'])
else:
cod_grano = int(self.datos.get('cod_grano') or 0)
f.set("grano", datos.GRANOS.get(cod_grano, ""))
cod_puerto = int(liq.get('cod_puerto', self.datos.get('cod_puerto')) or 0)
if cod_puerto in datos.PUERTOS:
f.set("des_puerto_localidad", datos.PUERTOS[cod_puerto])
cod_grado_ref = liq.get('cod_grado_ref', self.datos.get('cod_grado_ref')) or ""
if cod_grado_ref in datos.GRADOS_REF:
f.set("des_grado_ref", datos.GRADOS_REF[cod_grado_ref])
else:
f.set("des_grado_ref", cod_grado_ref)
cod_grado_ent = liq.get('cod_grado_ent', self.datos.get('cod_grado_ent'))
if 'val_grado_ent' in liq and int(liq.get('val_grado_ent') or 0):
val_grado_ent = liq['val_grado_ent']
elif 'val_grado_ent' in self.datos:
val_grado_ent = self.datos.get('val_grado_ent')
elif cod_grano in datos.GRADO_ENT_VALOR:
valores = datos.GRADO_ENT_VALOR[cod_grano]
if cod_grado_ent in valores:
val_grado_ent = valores[cod_grado_ent]
else:
val_grado_ent = ""
else:
val_grado_ent = ""
f.set("valor_grado_ent", "%s %s" % (cod_grado_ent or "", val_grado_ent or ""))
f.set("cont_proteico", liq.get('cont_proteico', self.datos.get('cont_proteico', "")))
if liq.get('certificados'):
# uso la procedencia del certificado de depósito
cert = liq['certificados'][0]
localidad, provincia = buscar_localidad_provincia(
cert['cod_prov_procedencia'],
cert['cod_localidad_procedencia'])
elif liq.get('cod_prov_procedencia_sin_certificado'):
localidad, provincia = buscar_localidad_provincia(
liq['cod_prov_procedencia_sin_certificado'],
liq['cod_localidad_procedencia_sin_certificado'])
else:
localidad, provincia = "", ""
f.set("procedencia", "%s - %s" % (localidad, provincia))
# si no se especifíca, uso la procedencia para el lugar
if not self.datos.get('lugar_y_fecha'):
localidad, provincia = buscar_localidad_provincia(
liq['cod_prov_procedencia'],
liq['cod_localidad_procedencia'])
lugar = "%s - %s " % (localidad, provincia)
fecha = datetime.datetime.today().strftime("%d/%m/%Y")
f.set("lugar_y_fecha", "%s, %s" % (fecha, lugar))
if 'lugar_y_fecha' in self.datos:
del self.datos['lugar_y_fecha']
if HOMO:
homo = "(pruebas)"
else:
homo = ""
if int(liq['cod_tipo_operacion'] or 0) == 1:
f.set("comprador.L", "COMPRADOR:")
f.set("vendedor.L", "VENDEDOR:")
f.set("formulario", u"Form. Electrónico 1116 B %s" % homo)
else:
f.set("comprador.L", "MANDATARIO/CONSIGNATARIO:")
f.set("vendedor.L", "MANDANTE/COMITENTE:")
f.set("formulario", u"Form. Electrónico 1116 C %s" % homo)
if int(liq.get("coe_ajustado") or 0) or int(liq.get("nro_contrato") or 0):
f.set("formulario", u"Ajuste Unificado %s" % homo)
certs = []
for cert in liq.get('certificados', []):
certs.append(u"%s Nº %s" % (
datos.TIPO_CERT_DEP[int(cert['tipo_certificado_deposito'])],
cert['nro_certificado_deposito']))
f.set("certificados_deposito", ', '.join(certs))
for i, deduccion in enumerate(liq.get('deducciones', [])):
for k, v in deduccion.items():
v = formatear(k, v, fmt_deduccion)
f.set("deducciones_%s_%02d" % (k, i + 1), v)
for i, retencion in enumerate(liq.get('retenciones', [])):
for k, v in retencion.items():
v = formatear(k, v, fmt_retencion)
f.set("retenciones_%s_%02d" % (k, i + 1), v)
if retencion['importe_certificado_retencion']:
d = retencion['fecha_certificado_retencion']
f.set('retenciones_cert_retencion_%02d' % (i + 1),
"%s $ %0.2f %s" % (
retencion['nro_certificado_retencion'] or '',
retencion['importe_certificado_retencion'],
"%s/%s/%s" % (d[8:10], d[5:7], d[2:4]),
))
# cargo campos adicionales ([PDF] en .ini y AgregarDatoPDF)
for k,v in self.datos.items():
f.set(k, v)
# Ajustes:
if clave:
f.set('subtipo_ajuste', {'ajuste_debito': u'AJUSTE DÉBITO',
'ajuste_credito': u'AJUSTE CRÉDITO'}[clave])
if int(liq.get('coe_ajustado') or 0):
f.set("leyenda_coe_nro", "COE Ajustado:")
f.set("nro_contrato_o_coe_ajustado", liq['coe_ajustado'])
f.set("coe_relacionados.L", "")
f.set("coe_relacionados", "")
elif liq.get('nro_contrato'):
f.set("leyenda_coe_nro", "Contrato Ajustado:")
f.set("nro_contrato_o_coe_ajustado", liq['nro_contrato'])
##f.set("coe_relacionados", TODO)
return True
except Exception, e:
ex = utils.exception_info()
try:
f.set('anulado', "%(name)s:%(lineno)s" % ex)
except:
pass
self.Excepcion = ex['msg']
self.Traceback = ex['tb']
if DEBUG:
print self.Excepcion
print self.Traceback
return False
def GenerarPDF(self, archivo="", dest="F"):
"Generar archivo de salida en formato PDF"
try:
self.template.render(archivo, dest=dest)
return True
except Exception, e:
self.Excepcion = str(e)
return False
def MostrarPDF(self, archivo, imprimir=False):
try:
if sys.platform=="linux2":
os.system("evince ""%s""" % archivo)
else:
operation = imprimir and "print" or ""
os.startfile(archivo, operation)
return True
except Exception, e:
self.Excepcion = str(e)
return False
def escribir_archivo(dic, nombre_archivo, agrega=True):
archivo = open(nombre_archivo, agrega and "a" or "w")
if '--json' in sys.argv:
json.dump(dic, archivo, sort_keys=True, indent=4)
elif '--dbf' in sys.argv:
formatos = [('Encabezado', ENCABEZADO, [dic]),
('Certificacion', CERTIFICACION, [dic]),
('Certificado', CERTIFICADO, dic.get('certificados', [])),
('Retencion', RETENCION, dic.get('retenciones', [])),
('Deduccion', DEDUCCION, dic.get('deducciones', [])),
('Percepcion', PERCEPCION, dic.get('percepciones', [])),
('Opcional', OPCIONAL, dic.get('opcionales', [])),
('AjusteCredito', AJUSTE, dic.get('ajuste_credito', [])),
('AjusteDebito', AJUSTE, dic.get('ajuste_debito', [])),
('CTG', CTG, dic.get('ctgs', [])),
('DetMuestraAnalisis', DET_MUESTRA_ANALISIS, dic.get('det_muestra_analisis', [])),
('Calidad', CALIDAD, dic.get('calidad', [])),
('FacturaPapel', FACTURA_PAPEL, dic.get('factura_papel', [])),
('Fusion', FUSION, dic.get('fusion', [])),
('Dato', DATO, dic.get('datos', [])),
('Error', ERROR, dic.get('errores', [])),
]
guardar_dbf(formatos, agrega, conf_dbf)
else:
dic['tipo_reg'] = 0
archivo.write(escribir(dic, ENCABEZADO))
dic['tipo_reg'] = 7
archivo.write(escribir(dic, CERTIFICACION))
if 'certificados' in dic:
for it in dic['certificados']:
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'retenciones' in dic:
for it in dic['retenciones']:
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
if 'deducciones' in dic:
for it in dic['deducciones']:
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
if 'percepciones' in dic:
for it in dic['percepciones']:
it['tipo_reg'] = 'P'
archivo.write(escribir(it, PERCEPCION))
if 'opcionales' in dic:
for it in dic['opcionales']:
it['tipo_reg'] = 'O'
archivo.write(escribir(it, OPCIONAL))
if 'ajuste_debito' in dic:
dic['ajuste_debito']['tipo_reg'] = 4
archivo.write(escribir(dic['ajuste_debito'], AJUSTE))
for it in dic['ajuste_debito'].get('retenciones', []):
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
for it in dic['ajuste_debito'].get('deducciones', []):
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
for it in dic['ajuste_debito'].get('percepciones', []):
it['tipo_reg'] = "P"
archivo.write(escribir(it, PERCEPCION))
for it in dic['ajuste_debito'].get('certificados', []):
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'ajuste_credito' in dic:
dic['ajuste_credito']['tipo_reg'] = 5
archivo.write(escribir(dic['ajuste_credito'], AJUSTE))
for it in dic['ajuste_credito'].get('retenciones', []):
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
for it in dic['ajuste_credito'].get('deducciones', []):
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
for it in dic['ajuste_credito'].get('percepciones', []):
it['tipo_reg'] = "P"
archivo.write(escribir(it, PERCEPCION))
for it in dic['ajuste_credito'].get('certificados', []):
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'ctgs' in dic:
for it in dic['ctgs']:
it['tipo_reg'] = 'C'
archivo.write(escribir(it, CTG))
if 'det_muestra_analisis' in dic:
for it in dic['det_muestra_analisis']:
it['tipo_reg'] = 'D'
archivo.write(escribir(it, DET_MUESTRA_ANALISIS))
if 'calidad' in dic:
for it in dic['calidad']:
it['tipo_reg'] = 'Q'
archivo.write(escribir(it, CALIDAD))
if 'factura_papel' in dic:
for it in dic['factura_papel']:
it['tipo_reg'] = 'F'
archivo.write(escribir(it, FACTURA_PAPEL))
if 'fusion' in dic:
for it in dic['fusion']:
it['tipo_reg'] = 'f'
archivo.write(escribir(it, FUSION))
if 'datos' in dic:
for it in dic['datos']:
it['tipo_reg'] = 9
archivo.write(escribir(it, DATO))
if 'errores' in dic:
for it in dic['errores']:
it['tipo_reg'] = 'R'
archivo.write(escribir(it, ERROR))
archivo.close()
def leer_archivo(nombre_archivo):
archivo = open(nombre_archivo, "r")
if '--json' in sys.argv:
dic = json.load(archivo)
elif '--dbf' in sys.argv:
dic = {'retenciones': [], 'deducciones': [], 'certificados': [],
'percepciones': [], 'opcionales': [], 'fusion': [],
'datos': [], 'ajuste_credito': [], 'ajuste_debito': [],
'ctgs': [], 'det_muestra_analisis': [], 'calidad': [],
}
formatos = [('Encabezado', ENCABEZADO, dic),
('Certificacion', CERTIFICACION, dic),
('Certificado', CERTIFICADO, dic['certificados']),
('Retencio', RETENCION, dic['retenciones']),
('Deduccion', DEDUCCION, dic['deducciones']),
('Percepcion', PERCEPCION, dic['percepciones']),
('Opcional', OPCIONAL, dic['opcionales']),
('AjusteCredito', AJUSTE, dic['ajuste_credito']),
('AjusteDebito', AJUSTE, dic['ajuste_debito']),
('CTG', CTG, dic.get('ctgs', [])),
('DetMuestraAnalisis', DET_MUESTRA_ANALISIS, dic.get('det_muestra_analisis', [])),
('Calidad', CALIDAD, dic.get('calidad', [])),
('FacturaPapel', FACTURA_PAPEL, dic.get('factura_papel', [])),
('Fusion', FUSION, dic.get('fusion', [])),
('Dato', DATO, dic['datos']),
]
leer_dbf(formatos, conf_dbf)
else:
dic = {'retenciones': [], 'deducciones': [], 'certificados': [],
'percepciones': [], 'opcionales': [],
'datos': [], 'ajuste_credito': {}, 'ajuste_debito': {},
'ctgs': [], 'det_muestra_analisis': [], 'calidad': [],
'factura_papel': [], 'fusion': [],
}
for linea in archivo:
if str(linea[0])=='0':
# encabezado base de las liquidaciones
d = leer(linea, ENCABEZADO)
if d['reservado1']:
print "ADVERTENCIA: USAR datos adicionales (nueva posición)"
d['datos_adicionales'] = d['reservado1']
dic.update(d)
# referenciar la liquidación para agregar ret. / ded.:
liq = dic
elif str(linea[0])=='1':
d = leer(linea, CERTIFICADO)
if d['reservado1']:
print "ADVERTENCIA: USAR tipo_certificado_deposito (nueva posición)"
d['tipo_certificado_deposito'] = d['reservado1']
liq['certificados'].append(d)
elif str(linea[0])=='2':
liq['retenciones'].append(leer(linea, RETENCION))
elif str(linea[0])=='3':
d = leer(linea, DEDUCCION)
# ajustes por cambios en afip (compatibilidad hacia atras):
if d['reservado1']:
print "ADVERTENCIA: USAR precio_pkg_diario!"
d['precio_pkg_diario'] = d['reservado1']
liq['deducciones'].append(d)
elif str(linea[0])=='P':
liq['percepciones'].append(leer(linea, PERCEPCION))
elif str(linea[0])=='O':
liq['opcionales'].append(leer(linea, OPCIONAL))
elif str(linea[0])=='4':
liq = leer(linea, AJUSTE)
liq.update({'retenciones': [], 'deducciones': [], 'percepciones': [], 'datos': [], 'certificados': []})
dic['ajuste_debito'] = liq
elif str(linea[0])=='5':
liq = leer(linea, AJUSTE)
liq.update({'retenciones': [], 'deducciones': [], 'percepciones': [], 'datos': [], 'certificados': []})
dic['ajuste_credito'] = liq
elif str(linea[0])=='7':
# actualizo con cabecera para certificaciones de granos:
d = leer(linea, CERTIFICACION)
dic.update(d)
elif str(linea[0])=='C':
dic['ctgs'].append(leer(linea, CTG))
elif str(linea[0])=='D':
dic['det_muestra_analisis'].append(leer(linea, DET_MUESTRA_ANALISIS))
elif str(linea[0])=='Q':
dic['calidad'].append(leer(linea, CALIDAD))
elif str(linea[0])=='F':
dic['factura_papel'].append(leer(linea, FACTURA_PAPEL))
elif str(linea[0])=='f':
dic['fusion'].append(leer(linea, FUSION))
elif str(linea[0])=='9':
dic['datos'].append(leer(linea, DATO))
else:
print "Tipo de registro incorrecto:", linea[0]
archivo.close()
if not 'nro_orden' in dic:
raise RuntimeError("Archivo de entrada invalido, revise campos y lineas en blanco")
if DEBUG:
import pprint; pprint.pprint(dic)
return dic
# busco el directorio de instalación (global para que no cambie si usan otra dll)
INSTALL_DIR = WSLPG.InstallDir = get_install_dir()
if __name__ == '__main__':
if '--ayuda' in sys.argv:
print LICENCIA
print AYUDA
sys.exit(0)
if '--formato' in sys.argv:
print "Formato:"
for msg, formato in [('Encabezado', ENCABEZADO),
('Certificado', CERTIFICADO),
('Retencion', RETENCION),
('Deduccion', DEDUCCION),
('Percepcion', PERCEPCION),
('Opcional', OPCIONAL),
('Ajuste', AJUSTE),
('Certificacion', CERTIFICACION),
('CTG', CTG),
('Det. Muestra Analisis', DET_MUESTRA_ANALISIS),
('Calidad', CALIDAD),
('Factura Papel', FACTURA_PAPEL),
('Fusion', FUSION),
('Evento', EVENTO), ('Error', ERROR),
('Dato', DATO)]:
comienzo = 1
print "=== %s ===" % msg
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or (tipo=='I' and '2' or '')
print " * Campo: %-20s Posición: %3d Longitud: %4d Tipo: %s Decimales: %s" % (
clave, comienzo, longitud, tipo, dec)
comienzo += longitud
sys.exit(0)
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(WSLPG)
sys.exit(0)
import csv
from ConfigParser import SafeConfigParser
from wsaa import WSAA
try:
if "--version" in sys.argv:
print "Versión: ", __version__
if len(sys.argv)>1 and sys.argv[1].endswith(".ini"):
CONFIG_FILE = sys.argv[1]
print "Usando configuracion:", CONFIG_FILE
config = SafeConfigParser()
config.read(CONFIG_FILE)
CERT = config.get('WSAA','CERT')
PRIVATEKEY = config.get('WSAA','PRIVATEKEY')
CUIT = config.get('WSLPG','CUIT')
ENTRADA = config.get('WSLPG','ENTRADA')
SALIDA = config.get('WSLPG','SALIDA')
if config.has_option('WSAA','URL') and not HOMO:
WSAA_URL = config.get('WSAA','URL')
else:
WSAA_URL = None #wsaa.WSAAURL
if config.has_option('WSLPG','URL') and not HOMO:
WSLPG_URL = config.get('WSLPG','URL')
else:
WSLPG_URL = WSDL
PROXY = config.has_option('WSAA', 'PROXY') and config.get('WSAA', 'PROXY') or None
CACERT = config.has_option('WSAA', 'CACERT') and config.get('WSAA', 'CACERT') or None
WRAPPER = config.has_option('WSAA', 'WRAPPER') and config.get('WSAA', 'WRAPPER') or None
if config.has_option('WSLPG', 'TIMEOUT'):
TIMEOUT = int(config.get('WSLPG', 'TIMEOUT'))
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
if DEBUG: print "conf_dbf", conf_dbf
else:
conf_dbf = {}
DEBUG = '--debug' in sys.argv
XML = '--xml' in sys.argv
if DEBUG:
print "Usando Configuración:"
print "WSAA_URL:", WSAA_URL
print "WSLPG_URL:", WSLPG_URL
print "CACERT", CACERT
print "WRAPPER", WRAPPER
print "timeout:", TIMEOUT
# obteniendo el TA
from wsaa import WSAA
wsaa = WSAA()
ta = wsaa.Autenticar("wslpg", CERT, PRIVATEKEY, wsdl=WSAA_URL,
proxy=PROXY, wrapper=WRAPPER, cacert=CACERT)
if not ta:
sys.exit("Imposible autenticar con WSAA: %s" % wsaa.Excepcion)
# cliente soap del web service
wslpg = WSLPG()
wslpg.LanzarExcepciones = True
wslpg.Conectar(url=WSLPG_URL, proxy=PROXY, wrapper=WRAPPER, cacert=CACERT, timeout=TIMEOUT)
wslpg.SetTicketAcceso(ta)
wslpg.Cuit = CUIT
if '--dummy' in sys.argv:
ret = wslpg.Dummy()
print "AppServerStatus", wslpg.AppServerStatus
print "DbServerStatus", wslpg.DbServerStatus
print "AuthServerStatus", wslpg.AuthServerStatus
##sys.exit(0)
if '--autorizar' in sys.argv:
if '--prueba' in sys.argv:
pto_emision = 99
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=pto_emision,
nro_orden=0, # que lo calcule automáticamente
cuit_comprador='20400000000',
nro_act_comprador=40, nro_ing_bruto_comprador='20400000000',
cod_tipo_operacion=2 if "--consign" in sys.argv else 1,
es_liquidacion_propia='N', es_canje='N',
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=31,
cuit_vendedor=23000000019, nro_ing_bruto_vendedor=23000000019,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
comision_corredor=1, nro_ing_bruto_corredor=wslpg.Cuit,
fecha_precio_operacion="2014-02-07",
precio_ref_tn=2000,
cod_grado_ref="G1",
cod_grado_ent="FG",
factor_ent=98, val_grado_ent=1.02,
precio_flete_tn=10,
cont_proteico=20,
alic_iva_operacion=10.5,
campania_ppal=1314,
cod_localidad_procedencia=5544,
cod_prov_procedencia=12,
nro_contrato=0,
datos_adicionales=("DATOS ADICIONALES 1234 " * 17) + ".",
##peso_neto_sin_certificado=2000,
precio_operacion=None, # para probar ajustar
total_peso_neto=1000, # para probar ajustar
certificados=[dict(
tipo_certificado_deposito=332, # cert. electronico
nro_certificado_deposito=332000000466,
peso_neto=1000,
cod_localidad_procedencia=3,
cod_prov_procedencia=1,
campania=1314,
fecha_cierre="2014-01-13",)],
retenciones=[dict(
codigo_concepto="RI",
detalle_aclaratorio="DETALLE DE IVA",
base_calculo=1000,
alicuota=10.5,
), dict(
codigo_concepto="RG",
detalle_aclaratorio="DETALLE DE GANANCIAS",
base_calculo=100,
alicuota=0,
), dict(
codigo_concepto="OG",
detalle_aclaratorio="OTRO GRAVAMEN",
base_calculo=1000,
alicuota=0,
nro_certificado_retencion=111111111111,
fecha_certificado_retencion="2013-05-01",
importe_certificado_retencion=105,
)],
deducciones=[dict(
codigo_concepto="OD",
detalle_aclaratorio="FLETE",
dias_almacenaje="0",
precio_pkg_diario=0.0,
comision_gastos_adm=0.0,
base_calculo=100.0,
alicuota=21.0,
),dict(
codigo_concepto="AL",
detalle_aclaratorio="ALMACENAJE",
dias_almacenaje="30",
precio_pkg_diario=0.0001,
comision_gastos_adm=0.0,
alicuota=21.0,
),],
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
datos=[
dict(campo="nombre_comprador", valor="NOMBRE 1"),
dict(campo="domicilio1_comprador", valor="DOMICILIO 1"),
dict(campo="domicilio2_comprador", valor="DOMICILIO 1"),
dict(campo="localidad_comprador", valor="LOCALIDAD 1"),
dict(campo="iva_comprador", valor="R.I."),
dict(campo="nombre_vendedor", valor="NOMBRE 2"),
dict(campo="domicilio1_vendedor", valor="DOMICILIO 2"),
dict(campo="domicilio2_vendedor", valor="DOMICILIO 2"),
dict(campo="localidad_vendedor", valor="LOCALIDAD 2"),
dict(campo="iva_vendedor", valor="R.I."),
dict(campo="nombre_corredor", valor="NOMBRE 3"),
dict(campo="domicilio_corredor", valor="DOMICILIO 3"),
]
)
if "--sincorr" in sys.argv:
# ajusto datos para prueba sin corredor
dic.update(dict(
cuit_comprador=wslpg.Cuit,
nro_act_comprador=29, nro_ing_bruto_comprador=wslpg.Cuit,
actua_corredor="N", liquida_corredor="N",
cuit_corredor=0,
comision_corredor=0, nro_ing_bruto_corredor=0,))
dic['retenciones'][1]['alicuota'] = 15
del dic['datos'][-1]
del dic['datos'][-1]
if "--sincert" in sys.argv:
# ajusto datos para prueba sin certificado de deposito
dic['peso_neto_sin_certificado'] = 10000
dic['cod_prov_procedencia_sin_certificado'] = 1
dic['cod_localidad_procedencia_sin_certificado'] = 15124
dic['certificados'] = []
if "--singrado" in sys.argv:
# ajusto datos para prueba sin grado ni valor entregado
dic['cod_grado_ref'] = ""
dic['cod_grado_ent'] = ""
dic['val_grado_ent'] = 0
if "--consign" in sys.argv:
# agrego deducción por comisión de gastos administrativos
dic['deducciones'].append(dict(
codigo_concepto="CO",
detalle_aclaratorio="COMISION",
dias_almacenaje=None,
precio_pkg_diario=None,
comision_gastos_adm=1.0,
base_calculo=1000.00,
alicuota=21.0,
))
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
# establezco los parametros (se pueden pasar directamente al metodo)
for k, v in sorted(dic.items()):
if DEBUG: print "%s = %s" % (k, v)
wslpg.SetParametro(k, v)
# cargo la liquidación:
wslpg.CrearLiquidacion()
for cert in dic.get('certificados', []):
wslpg.AgregarCertificado(**cert)
for ded in dic.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in dic.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for per in dic.get('percepciones', []):
wslpg.AgregarPercepcion(**per)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
if '--error' in sys.argv:
wslpg.LoadTestXML("wslpg_error.xml") # cargo error
else:
wslpg.LoadTestXML("wslpg_aut_test.xml") # cargo respuesta
print "Liquidacion: pto_emision=%s nro_orden=%s nro_act=%s tipo_op=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
wslpg.liquidacion['nroActComprador'],
wslpg.liquidacion['codTipoOperacion'],
)
if not '--dummy' in sys.argv:
if '--recorrer' in sys.argv:
print "Consultando actividades y operaciones habilitadas..."
lista_act_op = wslpg.ConsultarTiposOperacion(sep=None)
# recorro las actividades habilitadas buscando la
for nro_act, cod_op, det in lista_act_op:
print "Probando nro_act=", nro_act, "cod_op=", cod_op,
wslpg.liquidacion['nroActComprador'] = nro_act
wslpg.liquidacion['codTipoOperacion'] = cod_op
ret = wslpg.AutorizarLiquidacion()
if wslpg.COE:
print
break # si obtuve COE salgo
else:
print wslpgPDF.Errores
else:
print "Autorizando..."
ret = wslpg.AutorizarLiquidacion()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "COEAjustado", wslpg.COEAjustado
print "TotalDeduccion", wslpg.TotalDeduccion
print "TotalRetencion", wslpg.TotalRetencion
print "TotalRetencionAfip", wslpg.TotalRetencionAfip
print "TotalOtrasRetenciones", wslpg.TotalOtrasRetenciones
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
if False and '--testing' in sys.argv:
assert wslpg.COE == "330100000357"
assert wslpg.COEAjustado == None
assert wslpg.Estado == "AC"
assert wslpg.TotalPagoSegunCondicion == 1968.00
assert wslpg.GetParametro("fecha_liquidacion") == "2013-02-07"
assert wslpg.GetParametro("retenciones", 1, "importe_retencion") == "157.60"
if DEBUG:
pprint.pprint(wslpg.params_out)
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--ajustar' in sys.argv:
print "Ajustando..."
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=55, nro_orden=0, coe_ajustado='330100025869',
cod_localidad_procedencia=5544, cod_prov_procedencia=12,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=31, # no enviado a AFIP, pero usado para el PDF
certificados=[dict(
tipo_certificado_deposito=5,
nro_certificado_deposito=555501200729,
peso_neto=10000,
cod_localidad_procedencia=3,
cod_prov_procedencia=1,
campania=1213,
fecha_cierre='2013-01-13',
peso_neto_total_certificado=10000,
)],
fusion=[{'nro_ing_brutos': '20400000000', 'nro_actividad': 40}],
ajuste_credito=dict(
diferencia_peso_neto=1000, diferencia_precio_operacion=100,
cod_grado="G2", val_grado=1.0, factor=100,
diferencia_precio_flete_tn=10,
datos_adicionales='AJUSTE CRED UNIF',
concepto_importe_iva_0='Alicuota Cero',
importe_ajustar_Iva_0=900,
concepto_importe_iva_105='Alicuota Diez',
importe_ajustar_Iva_105=800,
concepto_importe_iva_21='Alicuota Veintiuno',
importe_ajustar_Iva_21=700,
deducciones=[dict(codigo_concepto="AL",
detalle_aclaratorio="Deduc Alm",
dias_almacenaje="1",
precio_pkg_diario=0.01,
comision_gastos_adm=1.0,
base_calculo=1000.0,
alicuota=10.5, )],
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Ret IVA",
base_calculo=1000,
alicuota=10.5, )],
certificados=[{'peso_neto': 200,
'coe_certificado_deposito': '330100025869'}],
),
ajuste_debito=dict(
diferencia_peso_neto=500, diferencia_precio_operacion=100,
cod_grado="G2", val_grado=1.0, factor=100,
diferencia_precio_flete_tn=0.01,
datos_adicionales='AJUSTE DEB UNIF',
concepto_importe_iva_0='Alic 0',
importe_ajustar_Iva_0=250,
concepto_importe_iva_105='Alic 10.5',
importe_ajustar_Iva_105=200,
concepto_importe_iva_21='Alicuota 21',
importe_ajustar_Iva_21=50,
deducciones=[dict(codigo_concepto="AL",
detalle_aclaratorio="Deduc Alm",
dias_almacenaje="1",
precio_pkg_diario=0.01,
comision_gastos_adm=1.0,
base_calculo=500.0,
alicuota=10.5, )],
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Ret IVA",
base_calculo=100,
alicuota=10.5, )],
certificados=[{'peso_neto': 300,
'coe_certificado_deposito': '330100025869'}],
),
datos=[
dict(campo="nombre_comprador", valor="NOMBRE 1"),
dict(campo="domicilio1_comprador", valor="DOMICILIO 1"),
dict(campo="domicilio2_comprador", valor="DOMICILIO 1"),
dict(campo="localidad_comprador", valor="LOCALIDAD 1"),
dict(campo="iva_comprador", valor="R.I."),
dict(campo="nombre_vendedor", valor="NOMBRE 2"),
dict(campo="domicilio1_vendedor", valor="DOMICILIO 2"),
dict(campo="domicilio2_vendedor", valor="DOMICILIO 2"),
dict(campo="localidad_vendedor", valor="LOCALIDAD 2"),
dict(campo="iva_vendedor", valor="R.I."),
dict(campo="nombre_corredor", valor="NOMBRE 3"),
dict(campo="domicilio_corredor", valor="DOMICILIO 3"),
# completo datos no contemplados en la respuesta por AFIP:
dict(campo="cod_grano", valor="31"),
dict(campo="cod_grado_ent", valor="G1"),
dict(campo="cod_grado_ref", valor="G1"),
dict(campo="factor_ent", valor="98"),
dict(campo="cod_puerto", valor=14),
dict(campo="cod_localidad_procedencia", valor=3),
dict(campo="cod_prov_procedencia", valor=1),
dict(campo="precio_ref_tn", valor="$ 1000,00"),
dict(campo="precio_flete_tn", valor="$ 100,00"),
dict(campo="des_grado_ref", valor="G1"),
dict(campo="alic_iva_operacion", valor=""),
]
)
if '--contrato' in sys.argv:
dic.update(
{'nro_act_comprador': 40,
'cod_grado_ent': 'G1',
'cod_grano': 31,
'cod_puerto': 14,
'cuit_comprador': 20400000000,
'cuit_corredor': 20267565393,
'cuit_vendedor': 23000000019,
'des_puerto_localidad': 'Desc Puerto',
'nro_contrato': 27,
'precio_flete_tn': 1000,
'precio_ref_tn': 1000,
'val_grado_ent': 1.01})
#del dic['ajuste_debito']['retenciones']
#del dic['ajuste_credito']['retenciones']
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
if '--contrato' in sys.argv:
for k in ("nro_contrato", "nro_act_comprador", "cod_grano",
"cuit_vendedor", "cuit_comprador", "cuit_corredor",
"precio_ref_tn", "cod_grado_ent", "val_grado_ent",
"precio_flete_tn", "cod_puerto",
"des_puerto_localidad"):
v = dic.get(k)
if v:
wslpg.SetParametro(k, v)
wslpg.CrearAjusteBase(pto_emision=dic['pto_emision'],
nro_orden=dic['nro_orden'],
coe_ajustado=dic['coe_ajustado'],
cod_localidad=dic['cod_localidad_procedencia'],
cod_provincia=dic['cod_prov_procedencia'],
)
for cert in dic.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
for fusion in dic.get('fusion', []):
wslpg.AgregarFusion(**fusion)
liq = dic['ajuste_credito']
wslpg.CrearAjusteCredito(**liq)
for ded in liq.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in liq.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for cert in liq.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
liq = dic['ajuste_debito']
wslpg.CrearAjusteDebito(**liq)
for ded in liq.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in liq.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for cert in liq.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/wslpg_ajuste_unificado.xml")
if '--contrato' in sys.argv:
ret = wslpg.AjustarLiquidacionContrato()
else:
ret = wslpg.AjustarLiquidacionUnificado()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Subtotal", wslpg.Subtotal
print "TotalIva105", wslpg.TotalIva105
print "TotalIva21", wslpg.TotalIva21
print "TotalRetencionesGanancias", wslpg.TotalRetencionesGanancias
print "TotalRetencionesIVA", wslpg.TotalRetencionesIVA
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'].update(wslpg.params_out)
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'].update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--asociar' in sys.argv:
print "Asociando...",
if '--prueba' in sys.argv:
# genero datos de ejemplo en el archivo para consultar:
dic = dict(coe="330100004664", nro_contrato=26, cod_grano=31,
cuit_comprador="20400000000",
cuit_vendedor="23000000019",
cuit_corredor="20267565393",
)
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
print ', '.join(sorted(["%s=%s" % (k, v) for k,v in dic.items()
if k in ("nro_contrato", "coe") or
k.startswith("cuit")]))
if not '--lsg' in sys.argv:
wslpg.AsociarLiquidacionAContrato(**dic)
else:
wslpg.AsociarLiquidacionSecundariaAContrato(**dic)
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Estado", wslpg.Estado
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--anular' in sys.argv:
##print wslpg.client.help("anularLiquidacion")
try:
coe = sys.argv[sys.argv.index("--anular") + 1]
except IndexError:
coe = 330100000357
if '--lsg' in sys.argv:
print "Anulando COE LSG", coe
ret = wslpg.AnularLiquidacionSecundaria(coe)
if '--cg' in sys.argv:
print "Anulando COE CG", coe
ret = wslpg.AnularCertificacion(coe)
else:
print "Anulando COE", coe
ret = wslpg.AnularLiquidacion(coe)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "COE", wslpg.COE
print "Resultado", wslpg.Resultado
print "Errores:", wslpg.Errores
sys.exit(0)
if '--consultar' in sys.argv:
pto_emision = None
nro_orden = 0
coe = pdf = None
try:
pto_emision = sys.argv[sys.argv.index("--consultar") + 1]
nro_orden = sys.argv[sys.argv.index("--consultar") + 2]
coe = sys.argv[sys.argv.index("--consultar") + 3]
pdf = sys.argv[sys.argv.index("--consultar") + 4]
except IndexError:
pass
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
wslpg.LoadTestXML("wslpg_cons_test.xml") # cargo prueba
print "Consultando: pto_emision=%s nro_orden=%s coe=%s" % (pto_emision, nro_orden, coe)
if '--lsg' in sys.argv:
ret = wslpg.ConsultarLiquidacionSecundaria(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
elif '--cg' in sys.argv:
ret = wslpg.ConsultarCertificacion(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
elif '--cancelar-anticipo' in sys.argv:
ret = wslpg.CancelarAnticipo(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
else:
ret = wslpg.ConsultarLiquidacion(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
print "COE", wslpg.COE
print "Estado", wslpg.Estado
print "Errores:", wslpg.Errores
# actualizo el archivo de salida con los datos devueltos
escribir_archivo(wslpg.params_out, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(wslpg.params_out)
if '--mostrar' in sys.argv and pdf:
wslpg.MostrarPDF(archivo=pdf,
imprimir='--imprimir' in sys.argv)
if '--consultar_ajuste' in sys.argv:
pto_emision = None
nro_orden = 0
nro_contrato = None
coe = pdf = None
try:
pto_emision = int(sys.argv[sys.argv.index("--consultar_ajuste") + 1])
nro_orden = int(sys.argv[sys.argv.index("--consultar_ajuste") + 2])
nro_contrato = int(sys.argv[sys.argv.index("--consultar_ajuste") + 3])
coe = sys.argv[sys.argv.index("--consultar_ajuste") + 4]
pdf = sys.argv[sys.argv.index("--consultar_ajuste") + 5]
except IndexError:
pass
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
wslpg.LoadTestXML("wslpg_cons_ajuste_test.xml") # cargo prueba
print "Consultando: pto_emision=%s nro_orden=%s nro_contrato=%s" % (
pto_emision, nro_orden, nro_contrato)
wslpg.ConsultarAjuste(pto_emision, nro_orden, nro_contrato, coe, pdf)
print "COE", wslpg.COE
print "Estado", wslpg.Estado
print "Errores:", wslpg.Errores
# actualizo el archivo de salida con los datos devueltos
dic = wslpg.params_out
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'] = wslpg.params_out
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'] = wslpg.params_out
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--consultar_por_contrato' in sys.argv:
print "Consultando liquidaciones por contrato...",
if '--prueba' in sys.argv:
# genero datos de ejemplo en el archivo para consultar:
dic = dict(nro_contrato=26, cod_grano=31,
cuit_comprador="20400000000",
cuit_vendedor="23000000019",
cuit_corredor="20267565393",
)
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
print ', '.join(sorted(["%s=%s" % (k, v) for k,v in dic.items()
if k == "nro_contrato" or k.startswith("cuit")]))
if not '--lsg' in sys.argv:
wslpg.ConsultarLiquidacionesPorContrato(**dic)
else:
wslpg.ConsultarLiquidacionesSecundariasPorContrato(**dic)
print "Errores:", wslpg.Errores
while wslpg.COE:
print "COE", wslpg.COE
wslpg.LeerDatosLiquidacion()
##print "Estado", wslpg.Estado
# actualizo el archivo de salida con los datos devueltos
dic['coe'] = wslpg.COE
escribir_archivo(dic, SALIDA, agrega=True)
if '--ult' in sys.argv:
try:
pto_emision = int(sys.argv[sys.argv.index("--ult") + 1])
except IndexError, ValueError:
pto_emision = 1
print "Consultando ultimo nro_orden para pto_emision=%s" % pto_emision,
if '--lsg' in sys.argv:
print "LSG"
ret = wslpg.ConsultarLiquidacionSecundariaUltNroOrden(pto_emision)
elif '--cg' in sys.argv:
print "CG"
ret = wslpg.ConsultarCertificacionUltNroOrden(pto_emision)
else:
print "LPG"
ret = wslpg.ConsultarUltNroOrden(pto_emision)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Ultimo Nro de Orden", wslpg.NroOrden
print "Errores:", wslpg.Errores
sys.exit(0)
if '--autorizar-lsg' in sys.argv:
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=99,
nro_orden=1, nro_contrato=100001232,
cuit_comprador='20400000000',
nro_ing_bruto_comprador='123',
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=2, cantidad_tn=100,
cuit_vendedor="23000000019", nro_act_vendedor=29,
nro_ing_bruto_vendedor=123456,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
nro_ing_bruto_corredor=wslpg.Cuit,
fecha_precio_operacion="2014-10-10",
precio_ref_tn=100, precio_operacion=150,
alic_iva_operacion=10.5, campania_ppal=1314,
cod_localidad_procedencia=197,
cod_prov_procedencia=10,
datos_adicionales="Prueba",
deducciones=[{'detalle_aclaratorio': 'deduccion 1',
'base_calculo': 100, 'alicuota_iva': 21}],
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
opcionales=[{'codigo': 1,
'descripcion': 'previsto para info adic.'}],
factura_papel=[{'nro_cai': "1234", 'nro_factura_papel': 1,
'fecha_factura': "2015-01-01",
'tipo_comprobante': 1}],
)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargo la liquidación:
wslpg.CrearLiqSecundariaBase(**dic)
for ded in dic.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for per in dic.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
for opc in dic.get("opcionales", []):
wslpg.AgregarOpcional(**opc)
for fp in dic.get('factura_papel', []):
wslpg.AgregarFacturaPapel(**fp)
print "Liquidacion Secundaria: pto_emision=%s nro_orden=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("wslpg_lsg_autorizar_resp.xml")
wslpg.AutorizarLiquidacionSecundaria()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("cod_tipo_operacion")
print wslpg.GetParametro("fecha_liquidacion")
print wslpg.GetParametro("subtotal")
print wslpg.GetParametro("importe_iva")
print wslpg.GetParametro("operacion_con_iva")
print wslpg.GetParametro("total_peso_neto")
print wslpg.GetParametro("numero_contrato")
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--ajustar-lsg' in sys.argv:
print "Ajustando LSG..."
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=55, nro_orden=0, coe_ajustado='330100025869',
cod_localidad_procedencia=5544, cod_prov_procedencia=12,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=2,
nro_contrato='1234' if '--contrato' in sys.argv else 0,
ajuste_credito=dict(
concepto_importe_iva_0='Alicuota Cero',
importe_ajustar_iva_0=900,
concepto_importe_iva_105='Alicuota Diez',
importe_ajustar_iva_105=800,
concepto_importe_iva_21='Alicuota Veintiuno',
importe_ajustar_iva_21=700,
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
estado=None,
coe_ajustado=None,
datos_adicionales='AJUSTE CRED LSG',
),
ajuste_debito=dict(
concepto_importe_iva_0='Alic 0',
importe_ajustar_iva_0=250,
concepto_importe_iva_105='Alic 10.5',
importe_ajustar_iva_105=200,
concepto_importe_iva_21='Alicuota 21',
importe_ajustar_iva_21=50,
percepciones=[{'detalle_aclaratoria': 'percepcion 2',
'base_calculo': 1000, 'alicuota_iva': 21}],
datos_adicionales='AJUSTE DEB LSG',
),
)
if '--contrato' in sys.argv:
dic.update(
{'nro_contrato': 27,
'cuit_comprador': 20400000000,
'cuit_vendedor': 23000000019,
'cuit_corredor': 20267565393, #opcional
'cod_grano': 2,
})
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarLiquidacionSecundariaUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
if '--contrato' in sys.argv:
for k in ("nro_contrato", "nro_act_comprador", "cod_grano",
"cuit_vendedor", "cuit_comprador", "cuit_corredor",
):
v = dic.get(k)
if v:
wslpg.SetParametro(k, v)
wslpg.CrearAjusteBase(pto_emision=dic['pto_emision'],
nro_orden=dic['nro_orden'],
coe_ajustado=dic['coe_ajustado'],
cod_localidad=dic['cod_localidad_procedencia'],
cod_provincia=dic['cod_prov_procedencia'],
)
if 'ajuste_credito' in dic:
liq = dic['ajuste_credito']
wslpg.CrearAjusteCredito(**liq)
for per in liq.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
if 'ajuste_debito' in dic:
liq = dic['ajuste_debito']
wslpg.CrearAjusteDebito(**liq)
for per in liq.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/wslpg_ajuste_secundaria.xml")
ret = wslpg.AjustarLiquidacionSecundaria()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Subtotal", wslpg.Subtotal
print "TotalIva105", wslpg.TotalIva105
print "TotalIva21", wslpg.TotalIva21
print "TotalRetencionesGanancias", wslpg.TotalRetencionesGanancias
print "TotalRetencionesIVA", wslpg.TotalRetencionesIVA
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'].update(wslpg.params_out)
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'].update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--autorizar-anticipo' in sys.argv:
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=33,
nro_orden=1,
cuit_comprador='20400000000',
nro_act_comprador='40',
nro_ing_bruto_comprador='123',
cod_tipo_operacion=2,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=1,
peso_neto_sin_certificado=100,
cuit_vendedor="30000000006",
nro_ing_bruto_vendedor=123456,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
nro_ing_bruto_corredor=wslpg.Cuit,
comision_corredor="20.6",
fecha_precio_operacion="2015-10-10",
precio_ref_tn=567, ## precio_operacion=150,
alic_iva_operacion="10.5", campania_ppal=1415,
cod_localidad_procedencia=197,
cod_prov_procedencia=10,
datos_adicionales="Prueba",
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Retenciones IVA",
base_calculo=100,
alicuota=10.5, ),
dict(codigo_concepto="RG",
detalle_aclaratorio="Retenciones GAN",
base_calculo=100,
alicuota=2, )],
)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargo la liquidación:
wslpg.CrearLiquidacion(**dic)
for ret in dic.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
print "Liquidacion Primaria (Ant): pto_emision=%s nro_orden=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("wslpg_autorizar_ant_resp.xml")
wslpg.AutorizarAnticipo()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("cod_tipo_operacion")
print wslpg.GetParametro("fecha_liquidacion")
print "TootalDeduccion", wslpg.TotalDeduccion
print "TotalRetencion", wslpg.TotalRetencion
print "TotalRetencionAfip", wslpg.TotalRetencionAfip
print "TotalOtrasRetenciones", wslpg.TotalOtrasRetenciones
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--autorizar-cg' in sys.argv:
if '--prueba' in sys.argv:
# consulto ultimo numero de orden
pto_emision = 99
wslpg.ConsultarCertificacionUltNroOrden(pto_emision)
# genero una certificación de ejemplo a autorizar:
dic = dict(
pto_emision=pto_emision, nro_orden=wslpg.NroOrden + 1,
tipo_certificado="P", nro_planta="3091",
nro_ing_bruto_depositario="20267565393",
titular_grano="T",
cuit_depositante='20111111112',
nro_ing_bruto_depositante='123',
cuit_corredor=None if '--sincorr' in sys.argv else '20222222223',
cod_grano=2, campania=1314,
datos_adicionales="Prueba",)
# datos provisorios de prueba (segun tipo de certificación):
if '--primaria' in sys.argv:
dep = dict(
nro_act_depositario=29,
tipo_certificado="P",
descripcion_tipo_grano="SOJA",
monto_almacenaje=1, monto_acarreo=2,
monto_gastos_generales=3, monto_zarandeo=4,
porcentaje_secado_de=5, porcentaje_secado_a=4,
monto_secado=7, monto_por_cada_punto_exceso=8,
monto_otros=9,
porcentaje_merma_volatil=15, peso_neto_merma_volatil=16,
porcentaje_merma_secado=17, peso_neto_merma_secado=18,
porcentaje_merma_zarandeo=19, peso_neto_merma_zarandeo=20,
peso_neto_certificado=21, servicios_secado=22,
servicios_zarandeo=23, servicio_otros=240000,
servicios_forma_de_pago=25,
# campos no documentados por AFIP:
servicios_conceptos_no_gravados=26,
servicios_percepciones_iva=27,
servicios_otras_percepciones=0, # no enviar si es 0
)
dic.update(dep)
det = dict(descripcion_rubro="bonif",
tipo_rubro="B", porcentaje=1, valor=1)
dic['det_muestra_analisis'] = [det]
cal = dict(analisis_muestra=10, nro_boletin=11,
cod_grado="F1", valor_grado=1.02,
valor_contenido_proteico=1, valor_factor=1)
dic['calidad'] = [cal]
ctg = dict(nro_ctg="123456", nro_carta_porte=1000,
porcentaje_secado_humedad=1, importe_secado=2,
peso_neto_merma_secado=3, tarifa_secado=4,
importe_zarandeo=5, peso_neto_merma_zarandeo=6,
tarifa_zarandeo=7,
peso_neto_confirmado_definitivo=1)
dic['ctgs'] = [ctg, ctg]
if '--retiro-transf' in sys.argv:
rt = dict(
nro_act_depositario=29,
tipo_certificado="R",
cuit_receptor="20267565393",
fecha="2014-11-26",
nro_carta_porte_a_utilizar="530305323",
cee_carta_porte_a_utilizar="123456789012",
)
dic.update(rt)
cert = dict(
peso_neto=10000,
coe_certificado_deposito="332000000357",
)
dic['certificados'] = [cert]
if '--preexistente' in sys.argv:
pre = dict(
tipo_certificado="E",
tipo_certificado_deposito_preexistente=1, # "R" o "T"
nro_certificado_deposito_preexistente="530305327",
cac_certificado_deposito_preexistente="85113524869336",
fecha_emision_certificado_deposito_preexistente="2014-11-26",
peso_neto=10000, nro_planta=3091,
)
dic.update(pre)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargar los datos según el tipo de certificación:
wslpg.CrearCertificacionCabecera(**dic)
if dic["tipo_certificado"] in ('P'):
wslpg.AgregarCertificacionPrimaria(**dic)
for ctg in dic.get("ctgs", []):
wslpg.AgregarCTG(**ctg)
for cal in dic.get("calidad", []):
wslpg.AgregarCalidad(**cal)
for det in dic.get("det_muestra_analisis", []):
wslpg.AgregarDetalleMuestraAnalisis(**det)
if dic["tipo_certificado"] in ('R', 'T'):
wslpg.AgregarCertificacionRetiroTransferencia(**dic)
for cert in dic.get("certificados", []):
wslpg.AgregarCertificado(**cert)
if dic["tipo_certificado"] in ('E', ):
wslpg.AgregarCertificacionPreexistente(**dic)
print "Certificacion: pto_emision=%s nro_orden=%s tipo=%s" % (
wslpg.certificacion['cabecera']['ptoEmision'],
wslpg.certificacion['cabecera']['nroOrden'],
wslpg.certificacion['cabecera']['tipoCertificado'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("tests/wslpg_cert_autorizar_resp.xml")
wslpg.LoadTestXML("tests/xml/wslpg_cg_err_response.xml")
wslpg.AutorizarCertificacion()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("fecha_certificacion")
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
# Informar calidad (solo CG primarias)
if '--informar-calidad' in sys.argv:
dic = leer_archivo(ENTRADA)
wslpg.CrearCertificacionCabecera(**dic)
wslpg.AgregarCertificacionPrimaria()
for cal in dic.get("calidad", []):
wslpg.AgregarCalidad(**cal)
for det in dic.get("det_muestra_analisis", []):
wslpg.AgregarDetalleMuestraAnalisis(**det)
# intento obtener el COE por linea de parametros o del archivo:
try:
coe = sys.argv[sys.argv.index("--informar-calidad") + 1]
except IndexError:
coe = dic['coe']
print "Informar Calidad: coe=%s " % (coe, )
wslpg.InformarCalidadCertificacion(coe)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
# consultar CTG a certificar en una CG:
if '--buscar-ctg' in sys.argv:
argv = dict([(i, e) for i, e
in enumerate(sys.argv[sys.argv.index("--buscar-ctg")+1:])
if not e.startswith("--")])
tipo_certificado = argv.get(0, "P") # P
cuit_depositante = argv.get(1) #
nro_planta = argv.get(2, 3091) or None # opcional si no es primaria
cod_grano = argv.get(3, 2)
campania = argv.get(4, 1314)
ret = wslpg.BuscarCTG(tipo_certificado, cuit_depositante,
nro_planta, cod_grano, campania)
pprint.pprint(wslpg.params_out)
if DEBUG:
print "NRO CTG", wslpg.GetParametro("ctgs", 0, "nro_ctg")
# consultar certificados con saldo disponible para liquidar/transferir:
if '--buscar-cert-con-saldo-disp' in sys.argv:
argv = dict([(i, e) for i, e
in enumerate(sys.argv[sys.argv.index("--buscar-cert-con-saldo-disp")+1:])
if not e.startswith("--")])
cuit_depositante = argv.get(0) # por defecto usa el CUIT .ini
cod_grano = argv.get(1, 2) #
campania = argv.get(2, 1314)
coe = argv.get(3)
fecha_emision_des = argv.get(4)
fecha_emision_has = argv.get(5)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/xml/wslpg_resp_buscar_cert.xml") # cargo respuesta
ret = wslpg.BuscarCertConSaldoDisponible(cuit_depositante,
cod_grano, campania, coe,
fecha_emision_des, fecha_emision_has,
)
pprint.pprint(wslpg.params_out)
print wslpg.ErrMsg
if DEBUG:
print "1er COE", wslpg.GetParametro("certificados", 0, "coe")
# Recuperar parámetros:
if '--campanias' in sys.argv:
ret = wslpg.ConsultarCampanias()
print "\n".join(ret)
if '--tipograno' in sys.argv:
ret = wslpg.ConsultarTipoGrano()
print "\n".join(ret)
if '--gradoref' in sys.argv:
ret = wslpg.ConsultarCodigoGradoReferencia()
print "\n".join(ret)
if '--gradoent' in sys.argv:
##wslpg.LoadTestXML("wslpg_cod.xml") # cargo respuesta de ej
cod_grano = raw_input("Ingrese el código de grano: ")
ret = wslpg.ConsultarGradoEntregadoXTipoGrano(cod_grano=cod_grano)
print "\n".join(ret)
if '--datos' in sys.argv:
print "# Grados"
print wslpg.ConsultarCodigoGradoReferencia(sep=None)
print "# Datos de grado entregado por tipo de granos:"
for cod_grano in wslpg.ConsultarTipoGrano(sep=None):
grad_ent = wslpg.ConsultarGradoEntregadoXTipoGrano(cod_grano, sep=None)
print cod_grano, ":", grad_ent, ","
if '--shelve' in sys.argv:
print "# Construyendo BD de Localidades por Provincias"
import wslpg_datos as datos
for cod_prov, desc_prov in wslpg.ConsultarProvincias(sep=None).items():
print "Actualizando Provincia", cod_prov, desc_prov
d = wslpg.BuscarLocalidades(cod_prov)
if '--certdeposito' in sys.argv:
ret = wslpg.ConsultarTipoCertificadoDeposito()
print "\n".join(ret)
if '--deducciones' in sys.argv:
ret = wslpg.ConsultarTipoDeduccion()
print "\n".join(ret)
if '--retenciones' in sys.argv:
ret = wslpg.ConsultarTipoRetencion()
print "\n".join(ret)
if '--puertos' in sys.argv:
ret = wslpg.ConsultarPuerto()
print "\n".join(ret)
if '--actividades' in sys.argv:
ret = wslpg.ConsultarTipoActividad()
print "\n".join(ret)
if '--actividadesrep' in sys.argv:
ret = wslpg.ConsultarTipoActividadRepresentado()
print "\n".join(ret)
print "Errores:", wslpg.Errores
if '--operaciones' in sys.argv:
ret = wslpg.ConsultarTiposOperacion()
print "\n".join(ret)
if '--provincias' in sys.argv:
ret = wslpg.ConsultarProvincias()
print "\n".join(ret)
if '--localidades' in sys.argv:
cod_prov = raw_input("Ingrese el código de provincia:")
ret = wslpg.ConsultarLocalidadesPorProvincia(cod_prov)
print "\n".join(ret)
# Generación del PDF:
if '--pdf' in sys.argv:
# cargo los datos del archivo de salida:
liq = wslpg.params_out = leer_archivo(SALIDA)
conf_liq = dict(config.items('LIQUIDACION'))
conf_pdf = dict(config.items('PDF'))
# establezco formatos (cantidad de decimales) según configuración:
wslpg.FmtCantidad = conf_liq.get("fmt_cantidad", "0.2")
wslpg.FmtPrecio = conf_liq.get("fmt_precio", "0.2")
# determino el formato según el tipo de liquidación y datos
if '--ajuste' not in sys.argv:
# liquidación estándar
formatos = [('formato', '')]
copias = int(conf_liq.get("copias", 3))
else:
# ajustes (páginas distintas), revisar si hay debitos/creditos:
formatos = [('formato_ajuste_base', '')]
copias = 1
if liq['ajuste_debito']:
formatos.append(('formato_ajuste_debcred', 'ajuste_debito' ))
if liq['ajuste_credito']:
formatos.append(('formato_ajuste_debcred', 'ajuste_credito'))
wslpg.CrearPlantillaPDF(
papel=conf_liq.get("papel", "legal"),
orientacion=conf_liq.get("orientacion", "portrait"),
)
for num_formato, (formato, clave) in enumerate(formatos):
# cargo el formato CSV por defecto (liquidacion....csv)
wslpg.CargarFormatoPDF(conf_liq.get(formato))
# datos fijos (configuracion):
for k, v in conf_pdf.items():
wslpg.AgregarDatoPDF(k, v)
# datos adicionales (tipo de registro 9):
for dato in liq.get('datos', []):
wslpg.AgregarDatoPDF(dato['campo'], dato['valor'])
if DEBUG: print "DATO", dato['campo'], dato['valor']
wslpg.ProcesarPlantillaPDF(num_copias=copias,
lineas_max=int(conf_liq.get("lineas_max", 24)),
qty_pos=conf_liq.get("cant_pos") or 'izq',
clave=clave)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
salida = conf_liq.get("salida", "")
# genero el nombre de archivo según datos de factura
d = os.path.join(conf_liq.get('directorio', "."),
liq['fecha_liquidacion'].replace("-", "_"))
if not os.path.isdir(d):
if DEBUG: print "Creando directorio!", d
os.makedirs(d)
fs = conf_liq.get('archivo','pto_emision,nro_orden').split(",")
fn = u'_'.join([unicode(liq.get(ff,ff)) for ff in fs])
fn = fn.encode('ascii', 'replace').replace('?','_')
salida = os.path.join(d, "%s.pdf" % fn)
if num_formato == len(formatos) - 1:
dest = "F" # si es el último, escribir archivo
else:
dest = "" # sino, no escribir archivo todavía
wslpg.GenerarPDF(archivo=salida, dest=dest)
print "Generando PDF", salida, dest
if '--mostrar' in sys.argv:
wslpg.MostrarPDF(archivo=salida,
imprimir='--imprimir' in sys.argv)
print "hecho."
except SoapFault,e:
print >> sys.stderr, "Falla SOAP:", e.faultcode, e.faultstring.encode("ascii","ignore")
sys.exit(3)
except Exception, e:
try:
print >> sys.stderr, traceback.format_exception_only(sys.exc_type, sys.exc_value)[0]
except:
print >> sys.stderr, "Excepción no disponible:", type(e)
if DEBUG:
raise
sys.exit(5)
finally:
if XML:
open("wslpg_request.xml", "w").write(wslpg.client.xml_request)
open("wslpg_response.xml", "w").write(wslpg.client.xml_response)
| gpl-3.0 |
sadaf2605/django | django/contrib/messages/api.py | 71 | 3105 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure(
'You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware'
)
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
return getattr(request, '_messages', [])
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
storage = getattr(request, '_messages', default_storage(request))
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| bsd-3-clause |
smorante/continuous-goal-directed-actions | demonstration-feature-selection/src/alternatives/main_dtw_mds_norm.py | 2 | 3731 | # -*- coding: utf-8 -*-
"""
Author: Santiago Morante
Robotics Lab. Universidad Carlos III de Madrid
"""
########################## DTW ####################################
import libmddtw
import matplotlib.pyplot as plt
from dtw import dtw
########################## MDS ####################################
import numpy as np
from sklearn.metrics import euclidean_distances
import libmds
########################## DBSCAN ####################################
import libdbscan
from sklearn.preprocessing import StandardScaler # to normalize
def normalize(X):
return StandardScaler().fit_transform(X)
def main():
NUMBER_OF_DEMONSTRATIONS=5
##########################################################################
########################## DTW ####################################
##########################################################################
dist=np.zeros((NUMBER_OF_DEMONSTRATIONS,NUMBER_OF_DEMONSTRATIONS))
demons=[]
# fill demonstrations
for i in range(NUMBER_OF_DEMONSTRATIONS):
demons.append(np.matrix([ np.sin(np.arange(15+i)+i) , np.sin(np.arange(15+i)+i)]))
# fill distance matrix
for i in range(NUMBER_OF_DEMONSTRATIONS):
for j in range(NUMBER_OF_DEMONSTRATIONS):
mddtw = libmddtw.Mddtw()
x,y = mddtw.collapseRows(demons[i],demons[j])
#fig = plt.figure()
#plt.plot(x)
#plt.plot(y)
singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])
dist[i][j]=singleDist
# print 'Minimum distance found:', singleDist
#fig = plt.figure()
# plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')
# plt.plot(path[0], path[1], 'w')
# plt.xlim((-0.5, cost.shape[0]-0.5))
# plt.ylim((-0.5, cost.shape[1]-0.5))
# print "dist", dist
###########################################################################
########################### MDS ####################################
###########################################################################
md = libmds.Mds()
md.create(n_components=1, metric=False, max_iter=3000, eps=1e-9, random_state=None,
dissimilarity="precomputed", n_jobs=1)
points = md.compute(dist)
print "points", points.flatten()
# md.plot()
##########################################################################
########################## norm ####################################
##########################################################################
from scipy.stats import norm
from numpy import linspace
from pylab import plot,show,hist,figure,title
param = norm.fit(points.flatten()) # distribution fitting
x = linspace(np.min(points),np.max(points),NUMBER_OF_DEMONSTRATIONS)
pdf_fitted = norm.pdf(x, loc=param[0],scale=param[1])
fig = plt.figure()
title('Normal distribution')
plot(x,pdf_fitted,'r-')
hist(points.flatten(),normed=1,alpha=.3)
show()
for elem in points:
if elem <= np.mean(points):
print "probability of point ", str(elem), ": ", norm.cdf(elem, loc=param[0],scale=param[1])
if elem > np.mean(points):
print "probability of point ", str(elem), ": ", 1-norm.cdf(elem, loc=param[0],scale=param[1])
##############################################################################
##############################################################################
if __name__ == "__main__":
main() | mit |
brunosantos/Bsan-kodi-repo | plugin.video.kodi/dns/rdtypes/ANY/RRSIG.py | 15 | 5774 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import calendar
import struct
import time
import dns.dnssec
import dns.exception
import dns.rdata
import dns.rdatatype
class BadSigTime(dns.exception.DNSException):
"""Raised when a SIG or RRSIG RR's time cannot be parsed."""
pass
def sigtime_to_posixtime(what):
if len(what) != 14:
raise BadSigTime
year = int(what[0:4])
month = int(what[4:6])
day = int(what[6:8])
hour = int(what[8:10])
minute = int(what[10:12])
second = int(what[12:14])
return calendar.timegm((year, month, day, hour, minute, second,
0, 0, 0))
def posixtime_to_sigtime(what):
return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
class RRSIG(dns.rdata.Rdata):
"""RRSIG record
@ivar type_covered: the rdata type this signature covers
@type type_covered: int
@ivar algorithm: the algorithm used for the sig
@type algorithm: int
@ivar labels: number of labels
@type labels: int
@ivar original_ttl: the original TTL
@type original_ttl: long
@ivar expiration: signature expiration time
@type expiration: long
@ivar inception: signature inception time
@type inception: long
@ivar key_tag: the key tag
@type key_tag: int
@ivar signer: the signer
@type signer: dns.name.Name object
@ivar signature: the signature
@type signature: string"""
__slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
'expiration', 'inception', 'key_tag', 'signer',
'signature']
def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature):
super(RRSIG, self).__init__(rdclass, rdtype)
self.type_covered = type_covered
self.algorithm = algorithm
self.labels = labels
self.original_ttl = original_ttl
self.expiration = expiration
self.inception = inception
self.key_tag = key_tag
self.signer = signer
self.signature = signature
def covers(self):
return self.type_covered
def to_text(self, origin=None, relativize=True, **kw):
return '%s %d %d %d %s %s %d %s %s' % (
dns.rdatatype.to_text(self.type_covered),
self.algorithm,
self.labels,
self.original_ttl,
posixtime_to_sigtime(self.expiration),
posixtime_to_sigtime(self.inception),
self.key_tag,
self.signer.choose_relativity(origin, relativize),
dns.rdata._base64ify(self.signature)
)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
type_covered = dns.rdatatype.from_text(tok.get_string())
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
labels = tok.get_int()
original_ttl = tok.get_ttl()
expiration = sigtime_to_posixtime(tok.get_string())
inception = sigtime_to_posixtime(tok.get_string())
key_tag = tok.get_int()
signer = tok.get_name()
signer = signer.choose_relativity(origin, relativize)
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
signature = b64.decode('base64_codec')
return cls(rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack('!HBBIIIH', self.type_covered,
self.algorithm, self.labels,
self.original_ttl, self.expiration,
self.inception, self.key_tag)
file.write(header)
self.signer.to_wire(file, None, origin)
file.write(self.signature)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack('!HBBIIIH', wire[current : current + 18])
current += 18
rdlen -= 18
(signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if not origin is None:
signer = signer.relativize(origin)
signature = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2],
header[3], header[4], header[5], header[6], signer,
signature)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.signer = self.signer.choose_relativity(origin, relativize)
def _cmp(self, other):
return self._wire_cmp(other)
| gpl-2.0 |
stargaser/astropy | astropy/coordinates/tests/accuracy/test_altaz_icrs.py | 4 | 8535 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for AltAz to ICRS coordinate transformations.
We use "known good" examples computed with other coordinate libraries.
Note that we use very low precision asserts because some people run tests on 32-bit
machines and we want the tests to pass there.
TODO: check if these tests pass on 32-bit machines and implement
higher-precision checks on 64-bit machines.
"""
import pytest
from astropy import units as u
from astropy.time import Time
from astropy.coordinates.builtin_frames import AltAz
from astropy.coordinates import EarthLocation
from astropy.coordinates import Angle, SkyCoord
@pytest.mark.remote_data
def test_against_hor2eq():
"""Check that Astropy gives consistent results with an IDL hor2eq example.
See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro
Test is against these run outputs, run at 2000-01-01T12:00:00:
# NORMAL ATMOSPHERE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 53 40 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 30.1 (hh:mm:ss)
Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000)
IDL> print, ra, dec
114.23004 15.418818
# NO PRESSURE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 54 41 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 26.4 (hh:mm:ss)
Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000)
IDL> print, ra, dec
114.24554 15.427022
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(lon=Angle('-111d36.0m'),
lat=Angle('31d57.8m'),
height=2120. * u.m)
obstime = Time(2451545.0, format='jd', scale='ut1')
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.781 * u.bar)
altaz_frame_noatm = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.0 * u.bar)
altaz = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame)
altaz_noatm = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame_noatm)
radec_frame = 'icrs'
radec_actual = altaz.transform_to(radec_frame)
radec_actual_noatm = altaz_noatm.transform_to(radec_frame)
radec_expected = SkyCoord('07h36m55.2s +15d25m08s', frame=radec_frame)
distance = radec_actual.separation(radec_expected).to('arcsec')
# this comes from running the example hor2eq but with the pressure set to 0
radec_expected_noatm = SkyCoord('07h36m58.9s +15d25m37s', frame=radec_frame)
distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to('arcsec')
# The baseline difference is ~2.3 arcsec with one atm of pressure. The
# difference is mainly due to the somewhat different atmospheric model that
# hor2eq assumes. This is confirmed by the second test which has the
# atmosphere "off" - the residual difference is small enough to be embedded
# in the assumptions about "J2000" or rounding errors.
assert distance < 5 * u.arcsec
assert distance_noatm < 0.4 * u.arcsec
@pytest.mark.remote_data
def test_against_pyephem():
"""Check that Astropy gives consistent results with one PyEphem example.
PyEphem: http://rhodesmill.org/pyephem/
See example input and output here:
https://gist.github.com/zonca/1672906
https://github.com/phn/pytpm/issues/2#issuecomment-3698679
"""
obstime = Time('2011-09-18 08:50:00')
location = EarthLocation(lon=Angle('-109d24m53.1s'),
lat=Angle('33d41m46.0s'),
height=30000. * u.m)
# We are using the default pressure and temperature in PyEphem
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=15 * u.deg_C, pressure=1.010 * u.bar)
altaz = SkyCoord('6.8927d -60.7665d', frame=altaz_frame)
radec_actual = altaz.transform_to('icrs')
radec_expected = SkyCoord('196.497518d -4.569323d', frame='icrs') # EPHEM
# radec_expected = SkyCoord('196.496220d -4.569390d', frame='icrs') # HORIZON
distance = radec_actual.separation(radec_expected).to('arcsec')
# TODO: why is this difference so large?
# It currently is: 31.45187984720655 arcsec
assert distance < 1e3 * u.arcsec
# Add assert on current Astropy result so that we notice if something changes
radec_expected = SkyCoord('196.495372d -4.560694d', frame='icrs')
distance = radec_actual.separation(radec_expected).to('arcsec')
# Current value: 0.0031402822944751997 arcsec
assert distance < 1 * u.arcsec
@pytest.mark.remote_data
def test_against_jpl_horizons():
"""Check that Astropy gives consistent results with the JPL Horizons example.
The input parameters and reference results are taken from this page:
(from the first row of the Results table at the bottom of that page)
http://ssd.jpl.nasa.gov/?horizons_tutorial
"""
obstime = Time('1998-07-28 03:00')
location = EarthLocation(lon=Angle('248.405300d'),
lat=Angle('31.9585d'),
height=2.06 * u.km)
# No atmosphere
altaz_frame = AltAz(obstime=obstime, location=location)
altaz = SkyCoord('143.2970d 2.6223d', frame=altaz_frame)
radec_actual = altaz.transform_to('icrs')
radec_expected = SkyCoord('19h24m55.01s -40d56m28.9s', frame='icrs')
distance = radec_actual.separation(radec_expected).to('arcsec')
# Current value: 0.238111 arcsec
assert distance < 1 * u.arcsec
@pytest.mark.remote_data
@pytest.mark.xfail(reason="Current output is completely incorrect")
def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed():
"""
http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(lon=Angle('-111.598333d'),
lat=Angle('31.956389d'),
height=2093.093 * u.m) # TODO: height correct?
obstime = Time('2010-01-01 12:00:00')
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.781 * u.bar)
radec = SkyCoord('12h22m54.899s 15d49m20.57s', frame='fk5')
altaz_actual = radec.transform_to(altaz_frame)
altaz_expected = SkyCoord('264d55m06s 37d54m41s', frame='altaz')
# altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz')
# altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz')
distance = altaz_actual.separation(altaz_expected)
# print(altaz_actual)
# print(altaz_expected)
# print(distance)
"""TODO: Current output is completely incorrect ... xfailing this test for now.
<SkyCoord (AltAz: obstime=2010-01-01 12:00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron):00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=133.4869896371561 deg, alt=67.97857990957701 deg>
<SkyCoord (AltAz: obstime=None, location=None, pressure=0.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=264.91833333333335 deg, alt=37.91138888888889 deg>
68d02m45.732s
"""
assert distance < 1 * u.arcsec
| bsd-3-clause |
jfarrell/thrift | test/py/SerializationTest.py | 21 | 17161 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ThriftTest.ttypes import (
Bonk,
Bools,
LargeDeltas,
ListBonks,
NestedListsBonk,
NestedListsI32x2,
NestedListsI32x3,
NestedMixedx2,
Numberz,
VersioningTestV1,
VersioningTestV2,
Xtruct,
Xtruct2,
)
from Recursive.ttypes import RecTree
from Recursive.ttypes import RecList
from Recursive.ttypes import CoRec
from Recursive.ttypes import CoRec2
from Recursive.ttypes import VectorTest
from DebugProtoTest.ttypes import CompactProtoTestStruct, Empty
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol
from thrift.TSerialization import serialize, deserialize
import sys
import unittest
class AbstractTest(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7, 8, 9],
newset=set([42, 1, 8]),
newmap={1: 2, 2: 3},
newstring="Hola!",
end_in_both=54321,
)
self.bools = Bools(im_true=True, im_false=False)
self.bools_flipped = Bools(im_true=False, im_false=True)
self.large_deltas = LargeDeltas(
b1=self.bools,
b10=self.bools_flipped,
b100=self.bools,
check_true=True,
b1000=self.bools_flipped,
check_false=False,
vertwo2000=VersioningTestV2(newstruct=Bonk(message='World!', type=314)),
a_set2500=set(['lazy', 'brown', 'cow']),
vertwo3000=VersioningTestV2(newset=set([2, 3, 5, 7, 11])),
big_numbers=[2 ** 8, 2 ** 16, 2 ** 31 - 1, -(2 ** 31 - 1)]
)
self.compact_struct = CompactProtoTestStruct(
a_byte=127,
a_i16=32000,
a_i32=1000000000,
a_i64=0xffffffffff,
a_double=5.6789,
a_string="my string",
true_field=True,
false_field=False,
empty_struct_field=Empty(),
byte_list=[-127, -1, 0, 1, 127],
i16_list=[-1, 0, 1, 0x7fff],
i32_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0x7fffffff],
i64_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff],
double_list=[0.1, 0.2, 0.3],
string_list=["first", "second", "third"],
boolean_list=[True, True, True, False, False, False],
struct_list=[Empty(), Empty()],
byte_set=set([-127, -1, 0, 1, 127]),
i16_set=set([-1, 0, 1, 0x7fff]),
i32_set=set([1, 2, 3]),
i64_set=set([-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff]),
double_set=set([0.1, 0.2, 0.3]),
string_set=set(["first", "second", "third"]),
boolean_set=set([True, False]),
# struct_set=set([Empty()]), # unhashable instance
byte_byte_map={1: 2},
i16_byte_map={1: 1, -1: 1, 0x7fff: 1},
i32_byte_map={1: 1, -1: 1, 0x7fffffff: 1},
i64_byte_map={0: 1, 1: 1, -1: 1, 0x7fffffffffffffff: 1},
double_byte_map={-1.1: 1, 1.1: 1},
string_byte_map={"first": 1, "second": 2, "third": 3, "": 0},
boolean_byte_map={True: 1, False: 0},
byte_i16_map={1: 1, 2: -1, 3: 0x7fff},
byte_i32_map={1: 1, 2: -1, 3: 0x7fffffff},
byte_i64_map={1: 1, 2: -1, 3: 0x7fffffffffffffff},
byte_double_map={1: 0.1, 2: -0.1, 3: 1000000.1},
byte_string_map={1: "", 2: "blah", 3: "loooooooooooooong string"},
byte_boolean_map={1: True, 2: False},
# list_byte_map # unhashable
# set_byte_map={set([1, 2, 3]) : 1, set([0, 1]) : 2, set([]) : 0}, # unhashable
# map_byte_map # unhashable
byte_map_map={0: {}, 1: {1: 1}, 2: {1: 1, 2: 2}},
byte_set_map={0: set([]), 1: set([1]), 2: set([1, 2])},
byte_list_map={0: [], 1: [1], 2: [1, 2]},
)
self.nested_lists_i32x2 = NestedListsI32x2(
[
[1, 1, 2],
[2, 7, 9],
[3, 5, 8]
]
)
self.nested_lists_i32x3 = NestedListsI32x3(
[
[
[2, 7, 9],
[3, 5, 8]
],
[
[1, 1, 2],
[1, 4, 9]
]
]
)
self.nested_mixedx2 = NestedMixedx2(int_set_list=[
set([1, 2, 3]),
set([1, 4, 9]),
set([1, 2, 3, 5, 8, 13, 21]),
set([-1, 0, 1])
],
# note, the sets below are sets of chars, since the strings are iterated
map_int_strset={10: set('abc'), 20: set('def'), 30: set('GHI')},
map_int_strset_list=[
{10: set('abc'), 20: set('def'), 30: set('GHI')},
{100: set('lmn'), 200: set('opq'), 300: set('RST')},
{1000: set('uvw'), 2000: set('wxy'), 3000: set('XYZ')}]
)
self.nested_lists_bonk = NestedListsBonk(
[
[
[
Bonk(message='inner A first', type=1),
Bonk(message='inner A second', type=1)
],
[
Bonk(message='inner B first', type=2),
Bonk(message='inner B second', type=2)
]
]
]
)
self.list_bonks = ListBonks(
[
Bonk(message='inner A', type=1),
Bonk(message='inner B', type=2),
Bonk(message='inner C', type=0)
]
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEquals(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEquals(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v2obj.end_in_both)
def testSerializeV1(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v1obj))
self.assertEquals(obj, self.v1obj)
def testSerializeV2(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v2obj))
self.assertEquals(obj, self.v2obj)
def testBools(self):
self.assertNotEquals(self.bools, self.bools_flipped)
self.assertNotEquals(self.bools, self.v1obj)
obj = self._deserialize(Bools, self._serialize(self.bools))
self.assertEquals(obj, self.bools)
obj = self._deserialize(Bools, self._serialize(self.bools_flipped))
self.assertEquals(obj, self.bools_flipped)
rep = repr(self.bools)
self.assertTrue(len(rep) > 0)
def testLargeDeltas(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(LargeDeltas, self._serialize(self.large_deltas))
self.assertEquals(obj, self.large_deltas)
rep = repr(self.large_deltas)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x2(self):
obj = self._deserialize(NestedListsI32x2, self._serialize(self.nested_lists_i32x2))
self.assertEquals(obj, self.nested_lists_i32x2)
rep = repr(self.nested_lists_i32x2)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x3(self):
obj = self._deserialize(NestedListsI32x3, self._serialize(self.nested_lists_i32x3))
self.assertEquals(obj, self.nested_lists_i32x3)
rep = repr(self.nested_lists_i32x3)
self.assertTrue(len(rep) > 0)
def testNestedMixedx2(self):
obj = self._deserialize(NestedMixedx2, self._serialize(self.nested_mixedx2))
self.assertEquals(obj, self.nested_mixedx2)
rep = repr(self.nested_mixedx2)
self.assertTrue(len(rep) > 0)
def testNestedListsBonk(self):
obj = self._deserialize(NestedListsBonk, self._serialize(self.nested_lists_bonk))
self.assertEquals(obj, self.nested_lists_bonk)
rep = repr(self.nested_lists_bonk)
self.assertTrue(len(rep) > 0)
def testListBonks(self):
obj = self._deserialize(ListBonks, self._serialize(self.list_bonks))
self.assertEquals(obj, self.list_bonks)
rep = repr(self.list_bonks)
self.assertTrue(len(rep) > 0)
def testCompactStruct(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(CompactProtoTestStruct, self._serialize(self.compact_struct))
self.assertEquals(obj, self.compact_struct)
rep = repr(self.compact_struct)
self.assertTrue(len(rep) > 0)
def testIntegerLimits(self):
if (sys.version_info[0] == 2 and sys.version_info[1] <= 6):
print('Skipping testIntegerLimits for Python 2.6')
return
bad_values = [CompactProtoTestStruct(a_byte=128), CompactProtoTestStruct(a_byte=-129),
CompactProtoTestStruct(a_i16=32768), CompactProtoTestStruct(a_i16=-32769),
CompactProtoTestStruct(a_i32=2147483648), CompactProtoTestStruct(a_i32=-2147483649),
CompactProtoTestStruct(a_i64=9223372036854775808), CompactProtoTestStruct(a_i64=-9223372036854775809)
]
for value in bad_values:
self.assertRaises(Exception, self._serialize, value)
def testRecTree(self):
"""Ensure recursive tree node can be created."""
children = []
for idx in range(1, 5):
node = RecTree(item=idx, children=None)
children.append(node)
parent = RecTree(item=0, children=children)
serde_parent = self._deserialize(RecTree, self._serialize(parent))
self.assertEquals(0, serde_parent.item)
self.assertEquals(4, len(serde_parent.children))
for child in serde_parent.children:
# Cannot use assertIsInstance in python 2.6?
self.assertTrue(isinstance(child, RecTree))
def _buildLinkedList(self):
head = cur = RecList(item=0)
for idx in range(1, 5):
node = RecList(item=idx)
cur.nextitem = node
cur = node
return head
def _collapseLinkedList(self, head):
out_list = []
cur = head
while cur is not None:
out_list.append(cur.item)
cur = cur.nextitem
return out_list
def testRecList(self):
"""Ensure recursive linked list can be created."""
rec_list = self._buildLinkedList()
serde_list = self._deserialize(RecList, self._serialize(rec_list))
out_list = self._collapseLinkedList(serde_list)
self.assertEquals([0, 1, 2, 3, 4], out_list)
def testCoRec(self):
"""Ensure co-recursive structures can be created."""
item1 = CoRec()
item2 = CoRec2()
item1.other = item2
item2.other = item1
# NOTE [econner724,2017-06-21]: These objects cannot be serialized as serialization
# results in an infinite loop. fbthrift also suffers from this
# problem.
def testRecVector(self):
"""Ensure a list of recursive nodes can be created."""
mylist = [self._buildLinkedList(), self._buildLinkedList()]
myvec = VectorTest(lister=mylist)
serde_vec = self._deserialize(VectorTest, self._serialize(myvec))
golden_list = [0, 1, 2, 3, 4]
for cur_list in serde_vec.lister:
out_list = self._collapseLinkedList(cur_list)
self.assertEqual(golden_list, out_list)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False)
class CompactProtocolTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class AcceleratedCompactTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False)
class JSONProtocolTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedFramedTest(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z") + 1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data) // 2
parts = [data[:cutpoint], data[cutpoint:]]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
class SerializersTest(unittest.TestCase):
def testSerializeThenDeserialize(self):
obj = Xtruct2(i32_thing=1,
struct_thing=Xtruct(string_thing="foo"))
s1 = serialize(obj)
for i in range(10):
self.assertEquals(s1, serialize(obj))
objcopy = Xtruct2()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
obj = Xtruct(string_thing="bar")
objcopy = Xtruct()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test booleans
obj = Bools(im_true=True, im_false=False)
objcopy = Bools()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test enums
for num, name in Numberz._VALUES_TO_NAMES.items():
obj = Bonk(message='enum Numberz value %d is string %s' % (num, name), type=num)
objcopy = Bonk()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedCompactTest))
suite.addTest(loader.loadTestsFromTestCase(CompactProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(JSONProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
suite.addTest(loader.loadTestsFromTestCase(SerializersTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| apache-2.0 |
jonls/lpd-monitor | btcon.py | 1 | 16600 |
import socket
import struct
import random
import hashlib
import errno
from gi.repository import GLib
from gi.repository import GObject
from bencode import bencode, bdecode, bdecode_all
class Bitfield(object):
def __init__(self, size, data=None):
if size < 0:
raise ValueError('Bitfield size must be non-negative')
self._size = size
self._data = bytearray((size+7)//8)
if data is not None:
for i in range(self._size):
bi = i // 8
if ord(data[bi]) & (1 << (7 - (i % 8))):
self.set(i)
def set(self, index):
if index >= self._size or index < 0:
raise IndexError('Invalid Bitfield index: %d' % index)
bi = index // 8
self._data[bi] |= 1 << (7 - (index % 8))
def count(self):
return sum(self)
def __iter__(self):
for i in range(self._size):
bi = i // 8
yield bool(self._data[bi] & (1 << (7 - (i % 8))))
def __len__(self):
return self._size
def __repr__(self):
return 'Bitfield(%d, %r)' % (self._size, ''.join(chr(x) for x in self._data))
class BTConnectionError(Exception):
pass
class BTConnection(GObject.GObject):
__gsignals__ = {
'state-changed': (GObject.SIGNAL_RUN_LAST, None, (int,)),
'metadata-changed': (GObject.SIGNAL_RUN_LAST, None, ()),
'peer-progress-changed': (GObject.SIGNAL_RUN_LAST, None, ())
}
STATE_NOT_CONNECTED = 0
STATE_HEADERS = 1
STATE_EXT_HEADERS = 2
STATE_RUNNING = 3
STATE_CLOSED = 4
HEADERS_LENGTH = 68
BYTE_EXT_EXTENSION = 44
BYTE_EXT_FAST_PEERS = 62
MSG_TYPE_CHOKE = 0
MSG_TYPE_UNCHOKE = 1
MSG_TYPE_INTERESTED = 2
MSG_TYPE_NOT_INTERESTED = 3
MSG_TYPE_HAVE = 4
MSG_TYPE_BITFIELD = 5
MSG_TYPE_REQUEST = 6
MSG_TYPE_PIECE = 7
MSG_TYPE_CANCEL = 8
MSG_TYPE_HAVE_ALL = 14
MSG_TYPE_HAVE_NONE = 15
MSG_TYPE_EXTENDED = 20
def __init__(self, infohash, peer_id=None):
super(BTConnection, self).__init__()
self._infohash = infohash
self._my_id = peer_id or ''.join(chr(random.randint(0, 255)) for i in range(20))
self._my_exts = {1: 'ut_metadata'}
self._metadata = None
self._ut_metadata_size = None
self._ut_metadata_buffer = ''
self._ut_metadata_last_req = None
self._peer_id = None
self._peer_byte_exts = set()
self._peer_exts = {}
self._peer_have = None
self._peer_have_queue = []
self._packet_len = None
self._packet = ''
self._packet_timeout = None
self._packet_callback = None
self._msg_len = None
self._msg_callback = None
self._socket = None
self._socket_queue = []
self._state = self.STATE_NOT_CONNECTED
self._input_source = None
self._output_source = None
self._connect_source = None
self._hangup_source = None
def open(self, address):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(0)
self._socket.bind(('', 0))
self._connect_source = GLib.io_add_watch(self._socket, GLib.IO_OUT, self._socket_connect_cb)
self._hangup_source = GLib.io_add_watch(self._socket, GLib.IO_HUP, self._socket_hangup_cb)
self._packet_expect_input(self.HEADERS_LENGTH, self._handle_headers, 30)
err = self._socket.connect_ex(address)
if err not in (0, errno.EINPROGRESS):
raise BTConnectionError('Unable to connect: {}'.format(errno.errorcode[err]))
self._send_headers()
self._change_state(self.STATE_HEADERS)
def close(self):
self._close_sources()
self._socket.close()
self._change_state(self.STATE_CLOSED)
print('Closed')
@property
def metadata(self):
return self._metadata
@property
def peer_progress(self):
if self._peer_have is None:
return None
return self._peer_have.count()
@property
def piece_count(self):
if self._metadata is None:
return None
return (self.data_length + self._metadata['piece length'] - 1) // self._metadata['piece length']
@property
def data_length(self):
if self._metadata is None:
return None
if 'files' in self._metadata:
return sum(f['length'] for f in self._metadata['files'])
else:
return self._metadata['length']
def _change_state(self, state):
self._state = state
self.emit('state-changed', self._state)
def _close_sources(self):
for source in (self._hangup_source, self._connect_source,
self._input_source, self._output_source,
self._packet_timeout):
if source is not None:
GLib.source_remove(source)
def _socket_connect_cb(self, source, cond):
err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
print 'Unable to connect: {}'.format(errno.errorcode[err])
self.close()
return False
def _socket_hangup_cb(self, source, cond):
print('Hangup')
self.close()
return False
def _socket_input_cb(self, source, cond):
self._packet += self._socket.recv(self._packet_len-len(self._packet))
if len(self._packet) == self._packet_len:
GLib.source_remove(self._packet_timeout)
packet = self._packet
self._packet = ''
self._packet_callback(packet)
return False
return True
def _socket_output_cb(self, source, cond):
while len(self._socket_queue) > 0:
packet = self._socket_queue[0]
n = self._socket.send(packet)
if n < len(packet):
self._socket_queue[0] = packet[n:]
return True
else:
self._socket_queue.pop(0)
return False
def _packet_timeout_cb(self):
print('No activity')
self.close()
return False
def _packet_expect_input(self, length, callback, timeout):
self._packet_len = length
self._packet_callback = callback
self._packet_timeout = GLib.timeout_add_seconds(timeout, self._packet_timeout_cb)
self._input_source = GLib.io_add_watch(self._socket, GLib.IO_IN, self._socket_input_cb)
def _packet_send(self, packet):
self._socket_queue.append(packet)
if len(self._socket_queue) == 1:
GLib.io_add_watch(self._socket, GLib.IO_OUT, self._socket_output_cb)
def _send_headers(self):
bt_header = chr(19) + 'BitTorrent protocol'
ext_bytes = '\x00\x00\x00\x00\x00\x10\x00\x04'
self._packet_send(bt_header + ext_bytes + self._infohash + self._my_id)
def _send_message(self, msg):
msg_len = struct.pack('>L', len(msg))
self._packet_send(msg_len + msg)
def _send_ext_headers(self):
msg = chr(20) + chr(0) + bencode({'m': dict((v, k) for k, v in self._my_exts.iteritems())})
self._send_message(msg)
def _send_initial_have(self):
if self.BYTE_EXT_FAST_PEERS in self._peer_byte_exts:
msg = chr(self.MSG_TYPE_HAVE_NONE)
self._send_message(msg)
def _ut_metadata_send_request(self, piece):
ext_id = self._peer_exts['ut_metadata']
msg = chr(20) + chr(ext_id) + bencode({'msg_type': 0, 'piece': piece})
self._ut_metadata_last_req = piece
self._send_message(msg)
def _ut_metadata_validate(self):
def validate_files_list(files):
if len(files) == 0:
return False
for f in files:
if not (type(f) is dict and
'length' in f and type(f['length']) is int and
'path' in f and type(f['path']) is list and
len(f['path']) > 0 and all(f['path'])):
return False
return True
if hashlib.sha1(self._ut_metadata_buffer).digest() == self._infohash:
info_dict = bdecode(self._ut_metadata_buffer)
if ('name' in info_dict and type(info_dict['name']) is str and
'piece length' in info_dict and type(info_dict['piece length']) is int and
'pieces' in info_dict and type(info_dict['pieces']) is str and
(('length' in info_dict and type(info_dict['length']) is int) or
('files' in info_dict and type(info_dict['files']) is list and
validate_files_list(info_dict['files'])))):
self._ut_metadata_buffer = None
self._metadata = info_dict
if len(self._metadata['pieces']) != 20*self.piece_count:
self._metadata = None
return False
self.emit('metadata-changed')
self._play_have_queue()
return True
return False
def _handle_headers(self, packet):
bt_header_len, packet = ord(packet[:1]), packet[1:]
if bt_header_len != 19:
self.close()
return
bt_header, packet = packet[:bt_header_len], packet[bt_header_len:]
if bt_header != 'BitTorrent protocol':
self.close()
return
print('Connected to {!r}'.format(self._socket.getpeername()))
ext_bytes, packet = packet[:8], packet[8:]
print('Extension bytes {!r}'.format(ext_bytes))
if ord(ext_bytes[7]) & 0x4:
self._peer_byte_exts.add(self.BYTE_EXT_FAST_PEERS)
if ord(ext_bytes[5]) & 0x10:
self._peer_byte_exts.add(self.BYTE_EXT_EXTENSION)
infohash, packet = packet[:20], packet[20:]
if infohash != self._infohash:
self.close()
return
self._peer_id = packet[:20]
print('Peer id {!r}'.format(self._peer_id))
if self.BYTE_EXT_EXTENSION in self._peer_byte_exts:
self._change_state(self.STATE_EXT_HEADERS)
self._msg_callback = self._handle_ext_headers
self._send_ext_headers()
else:
self._change_state(self.STATE_RUNNING)
self._msg_callback = self._handle_message
self._send_initial_have()
self._packet_expect_input(4, self._handle_message_input, 240)
def _handle_message_input(self, packet):
if self._msg_len is None:
self._msg_len = struct.unpack('>L', packet)[0]
if self._msg_len == 0:
self._msg_len = None
self._packet_expect_input(4, self._handle_message_input, 240)
if self._msg_len > 64*1024*1024:
self.close()
return
else:
self._packet_expect_input(self._msg_len, self._handle_message_input, 60)
else:
self._msg_callback(packet)
self._msg_len = None
self._packet_expect_input(4, self._handle_message_input, 240)
def _handle_ext_headers(self, msg):
msg_type, msg = ord(msg[:1]), msg[1:]
if msg_type != self.MSG_TYPE_EXTENDED or len(msg) < 2:
self.close()
return
msg_ext_type, msg = ord(msg[:1]), msg[1:]
if msg_ext_type != 0:
self.close()
return
msg = bdecode(msg)
print('Extended handshake: {!r}'.format(msg))
if 'm' in msg and type(msg['m']) is dict:
for ext, ext_id in msg['m'].iteritems():
self._peer_exts[ext] = ext_id
if 'metadata_size' in msg and type(msg['metadata_size']) is int:
self._ut_metadata_size = msg['metadata_size']
self._change_state(self.STATE_RUNNING)
self._msg_callback = self._handle_message
self._send_initial_have()
if self._peer_exts.get('ut_metadata', 0) > 0:
self._ut_metadata_send_request(0)
def _play_have_queue(self):
if len(self._peer_have_queue) > 0:
msg_type, msg = self._peer_have_queue.pop(0)
self._handle_first_have_message(msg_type, msg)
while len(self._peer_have_queue) > 0:
msg_type, msg = self._peer_have_queue.pop(0)
self._handle_have_message(msg_type, msg)
def _handle_first_have_message(self, msg_type, msg):
def handle_bitfield(msg):
if 8*len(msg) < self.piece_count:
self.close()
return
self._peer_have = Bitfield(self.piece_count, msg)
def handle_have_all():
self._peer_have = Bitfield(self.piece_count)
for i in range(len(self._peer_have)):
self._peer_have.set(i)
def handle_have_none():
self._peer_have = Bitfield(self.piece_count)
if msg_type == self.MSG_TYPE_BITFIELD:
handle_bitfield(msg)
elif msg_type == self.MSG_TYPE_HAVE_ALL:
handle_have_all()
elif msg_type == self.MSG_TYPE_HAVE_NONE:
handle_have_none()
elif (msg_type == self.MSG_TYPE_HAVE and
not self.BYTE_EXT_FAST_PEERS in self._peer_byte_exts):
self._peer_have = Bitfield(self.piece_count)
self._handle_have_message(msg_type, msg)
else:
self.close()
return
self.emit('peer-progress-changed')
def _handle_have_message(self, msg_type, msg):
if msg_type == self.MSG_TYPE_HAVE:
index = struct.unpack('>L', msg)[0]
self._peer_have.set(index)
else:
self.close()
return
self.emit('peer-progress-changed')
def _handle_message(self, msg):
msg_type, msg = ord(msg[:1]), msg[1:]
def print_message():
print('Message: {}, {!r}'.format(msg_type, msg))
if ((msg_type == self.MSG_TYPE_HAVE and len(msg) == 4) or
(msg_type == self.MSG_TYPE_HAVE_ALL and len(msg) == 1) or
(msg_type == self.MSG_TYPE_HAVE_NONE and len(msg) == 1) or
msg_type == self.MSG_TYPE_BITFIELD):
if self.piece_count is None:
self._peer_have_queue.append((msg_type, msg))
elif self._peer_have is None:
self._handle_first_have_message(msg_type, msg)
else:
self._handle_have_message(msg_type, msg)
elif msg_type == self.MSG_TYPE_EXTENDED:
if len(msg) < 1:
self.close()
return
msg_ext_id, msg = ord(msg[:1]), msg[1:]
if msg_ext_id > 0 and msg_ext_id in self._my_exts:
msg_ext = self._my_exts[msg_ext_id]
if msg_ext == 'ut_metadata':
msg, rest = bdecode_all(msg)
total_pieces = (self._ut_metadata_size + (2**14-1)) / (2**14)
last_piece_size = self._ut_metadata_size - (2**14)*(total_pieces-1)
if 'msg_type' in msg and type(msg['msg_type']) is int:
if msg['msg_type'] == 0:
pass
elif msg['msg_type'] == 1:
if ('piece' in msg and type(msg['piece']) is int and
msg['piece'] == self._ut_metadata_last_req and
((msg['piece'] < total_pieces - 1 and
len(rest) == 2**14) or
(msg['piece'] == total_pieces - 1 and
len(rest) == last_piece_size))):
self._ut_metadata_buffer += rest
print('Metadata download: {}%'.format(int(100*float(self._ut_metadata_last_req+1)/total_pieces)))
if msg['piece'] == total_pieces - 1:
self._ut_metadata_last_req = None
self._ut_metadata_validate()
else:
self._ut_metadata_send_request(self._ut_metadata_last_req+1)
elif msg['msg_type'] == 2:
pass
else:
self.close()
return
elif msg_ext_id == 0:
print_message()
else:
self.close()
return
else:
print_message()
| mit |
brandond/ansible | lib/ansible/modules/windows/win_uri.py | 13 | 6899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Corwin Brown <[email protected]>
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_uri
version_added: '2.1'
short_description: Interacts with webservices
description:
- Interacts with FTP, HTTP and HTTPS web services.
- Supports Digest, Basic and WSSE HTTP authentication mechanisms.
- For non-Windows targets, use the M(uri) module instead.
options:
url:
description:
- Supports FTP, HTTP or HTTPS URLs in the form of (ftp|http|https)://host.domain:port/path.
type: str
required: yes
method:
description:
- The HTTP Method of the request or response.
type: str
choices: [ CONNECT, DELETE, GET, HEAD, MERGE, OPTIONS, PATCH, POST, PUT, REFRESH, TRACE ]
default: GET
content_type:
description:
- Sets the "Content-Type" header.
type: str
body:
description:
- The body of the HTTP request/response to the web service.
type: raw
user:
description:
- Username to use for authentication.
type: str
version_added: '2.4'
password:
description:
- Password to use for authentication.
type: str
version_added: '2.4'
force_basic_auth:
description:
- By default the authentication information is only sent when a webservice
responds to an initial request with a 401 status. Since some basic auth
services do not properly send a 401, logins will fail.
- This option forces the sending of the Basic authentication header upon
the initial request.
type: bool
default: no
version_added: '2.5'
dest:
description:
- Output the response body to a file.
type: path
version_added: '2.3'
headers:
description:
- Extra headers to set on the request, see the examples for more details on
how to set this.
type: dict
creates:
description:
- A filename, when it already exists, this step will be skipped.
type: path
version_added: '2.4'
removes:
description:
- A filename, when it does not exist, this step will be skipped.
type: path
version_added: '2.4'
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
type: bool
default: no
version_added: '2.4'
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the request.
- Can also be comma separated list of status codes.
type: list
default: 200
version_added: '2.4'
timeout:
description:
- Specifies how long the request can be pending before it times out (in seconds).
- The value 0 (zero) specifies an indefinite time-out.
- A Domain Name System (DNS) query can take up to 15 seconds to return or time out.
If your request contains a host name that requires resolution, and you set
C(timeout) to a value greater than zero, but less than 15 seconds, it can
take 15 seconds or more before your request times out.
type: int
default: 30
version_added: '2.4'
follow_redirects:
description:
- Whether or not the C(win_uri) module should follow redirects.
- C(all) will follow all redirects.
- C(none) will not follow any redirects.
- C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a C(GET) or C(HEAD) on the URI to which it is being redirected.
type: str
choices: [ all, none, safe ]
default: safe
version_added: '2.4'
maximum_redirection:
description:
- Specifies how many times C(win_uri) redirects a connection to an alternate
Uniform Resource Identifier (URI) before the connection fails.
- If C(maximum_redirection) is set to 0 (zero)
or C(follow_redirects) is set to C(none),
or set to C(safe) when not doing C(GET) or C(HEAD) it prevents all redirection.
type: int
default: 5
version_added: '2.4'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates.
type: bool
default: yes
version_added: '2.4'
client_cert:
description:
- Specifies the client certificate (.pfx) that is used for a secure web request.
- The WinRM connection must be authenticated with C(CredSSP) if the
certificate file is not password protected.
- Other authentication types can set I(client_cert_password) when the cert
is password protected.
type: path
version_added: '2.4'
client_cert_password:
description:
- The password for the client certificate (.pfx) file that is used for a
secure web request.
type: str
version_added: '2.5'
seealso:
- module: uri
- module: win_get_url
author:
- Corwin Brown (@blakfeld)
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Perform a GET and Store Output
win_uri:
url: http://example.com/endpoint
register: http_output
# Set a HOST header to hit an internal webserver:
- name: Hit a Specific Host on the Server
win_uri:
url: http://example.com/
method: GET
headers:
host: www.somesite.com
- name: Perform a HEAD on an Endpoint
win_uri:
url: http://www.example.com/
method: HEAD
- name: POST a Body to an Endpoint
win_uri:
url: http://www.somesite.com/
method: POST
body: "{ 'some': 'json' }"
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: always
type: float
sample: 23.2
url:
description: The Target URL.
returned: always
type: str
sample: https://www.ansible.com
status_code:
description: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summary of the status.
returned: success
type: str
sample: OK
content:
description: The raw content of the HTTP response.
returned: success and return_content is True
type: str
sample: '{"foo": "bar"}'
content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
json:
description: The json structure returned under content as a dictionary.
returned: success and Content-Type is "application/json" or "application/javascript" and return_content is True
type: dict
sample: {"this-is-dependent": "on the actual return content"}
'''
| gpl-3.0 |
akozumpl/yum | docs/sphinxdocs/rstgenerator.py | 2 | 7752 | #! /usr/bin/python
import sys, re, os
def generateFile(input_directory, file_name, output_directory,
package_heirarchy=None, module_name=None):
"""Generate a rst file telling sphinx to just generate documentation
for the public interface automatically. Output will be written to
*file_name*.rst in the current directory.
:param input_directory: a string specifying the directory containing the
source code file
:param file_name: the name of the python source code file to generate
a sphinx rst file describing
:param ouput_directory: a string specifying the directory where
the generated rst file should be placed. If *output_directory* does
not already exist, it will be created
:param package_heirarchy: a list of strings, where each name is
the name of a package, in the order of the hierarchy
:param module_name: the name of the module. If not given, the .py is
removed from *file_name* to produce the module_name
"""
#Stick all output into a list of strings, then just join it and output
#it all in on go.
output = []
# Create the output directory if it doesn't already exist. Note that
# if the directory is created between the check and the creation, it
# might cause issues, but I don't think this likely at all to happen
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except OSError as e:
print "Error creating the output directory"
print e.args
try:
#Open the file
f = open(os.path.join(input_directory, file_name), 'r')
#Do the module output
if not module_name:
module_name = re.search('(\w+).py$', file_name).group(1)
#Append the package names, if there are any
full_module_name = module_name
if package_heirarchy:
full_module_name = '.'.join(package_heirarchy) + '.' + module_name
output.append(full_module_name)
output.append('=' * len(full_module_name))
output.append('.. automodule:: %s\n' % full_module_name)
#Read the file, and do output for classes
class_reg = re.compile('^class (\w+)')
func_reg = re.compile('^def ((?:[a-zA-Z0-9]+_)*[a-zA-Z0-9]+)')
#We don't need a blank line between autofunction directives, but we do
#need one between autofunctions and headings etc. for classes. This
#keeps track if we're switching from autofunctions to classes, so we
#can add that blank line.
finding_functions = False
for line in iter(f):
#Search for classes
match = class_reg.match(line)
if match is not None:
if finding_functions:
output.append('')
finding_functions = False
class_name = match.group(1)
output.append(class_name)
output.append('-' * len(class_name))
output.append('''.. autoclass:: %s
:members:
:show-inheritance:
''' % class_name)
#Search for top level functions
else:
match = func_reg.match(line)
if match is not None:
func_name = match.group(1)
output.append('.. autofunction:: ' + func_name)
finding_functions = True
f.close()
except IOError as e:
print "Error opening the input file : ", os.path.join(input_directory, file_name)
print e.args[1]
else:
#Write the output
try:
output_file_name = os.path.join(output_directory, module_name) + '.rst'
f = open(output_file_name, 'w')
f.write('\n'.join(output))
except IOError as e:
print "Error opening the output file : ", output_file_name
print e.args[1]
def generateIndex(module_list, output_directory):
"""Create an index.rst file for sphinx in the given directory.
:param module_list: a list of the names of the modules to list in
the index file
:param output_directory: the directory to create the index file in
"""
#Sort the module_list
module_list.sort()
try:
#open the file
f = open(os.path.join(output_directory, 'index.rst'), 'w')
#Do the output
f.write(""".. Yum documentation master file, created by
sphinx-quickstart on Mon Jun 27 14:01:20 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Yum's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
""")
f.write('\n '.join(module_list))
f.write("""
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
""")
except IOError as e:
print "Error opening the output file."
print e.args[1]
def generateAll(source_directory, output_directory):
#Verify that both the source and output directories exist
# Keep a set of file names that are packages. This is
# useful so that later we will be able to figure out full
# module names.
packages = set()
# Keep a list of tuples containing python module names and
# relative paths, so that we can build the index file later
modules = []
# Walk the directory tree
for dirpath, dirnames, filenames in os.walk(source_directory, topdown=True):
# print dirpath
# print dirnames
# print filenames
# print
# Add the curent directory to packages if __init__.py exists
if '__init__.py' in filenames:
packages.add(dirpath)
# Find the heirarchy of packages that we are currently in
package_heirarchy = []
#Recurse up to the root
dirpath_i = dirpath
while dirpath_i != '/':
if dirpath_i in packages:
dirpath_i, tail = os.path.split(dirpath_i)
package_heirarchy.insert(0, tail)
else:
break
# Find the relative output directory, mirroring the input
# directory structure
relative_output_directory = ''
if not os.path.samefile(dirpath, source_directory):
relative_output_directory = os.path.relpath(dirpath, source_directory)
# Don't recurse into directories that are hidden, or for docs
for directory in dirnames:
if directory == "docs" or directory.startswith("."):
dirnames.remove(directory)
# Generate the rst for a file if it is a python source code file
for file_name in filenames:
# Skip file names that contain dashes, since they're not
# valid module names, so we won't be able to import them
# to generate the documentation anyway
if '-' in file_name:
continue
if file_name.endswith('.py'):
module_name = file_name.partition('.')[0]
modules.append(os.path.join(relative_output_directory,
module_name))
generateFile(dirpath, file_name,
os.path.join(output_directory, relative_output_directory),
package_heirarchy, module_name)
# Create the index.rst file
generateIndex(modules, output_directory)
if __name__ == "__main__":
generateAll(os.getcwd(), os.getcwd())
| gpl-2.0 |
cholokei/android_kernel_samsung_milletwifikx | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Livefyre/mongo-connector | mongo_connector/doc_managers/solr_doc_manager.py | 7 | 11191 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL,
DEFAULT_MAX_BULK)
from mongo_connector.util import retry_until_ok
from mongo_connector.doc_managers import DocManagerBase, exception_wrapper
from mongo_connector.doc_managers.formatters import DocumentFlattener
# pysolr only has 1 exception: SolrError
wrap_exceptions = exception_wrapper({
SolrError: errors.OperationFailed})
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = doc.pop("_id")
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update spec contains the new document
update_spec['_ts'] = doc['_ts']
update_spec['ns'] = doc['ns']
update_spec['_id'] = doc['_id']
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
@wrap_exceptions
def update(self, doc, update_spec):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# Commit outstanding changes so that the document to be updated is the
# same version to which the changes apply.
self.commit()
query = "%s:%s" % (self.unique_key, str(doc['_id']))
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
# Results is an iterable containing only 1 result
for doc in results:
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated)
return updated
@wrap_exceptions
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
@wrap_exceptions
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs)
@wrap_exceptions
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc["_id"]),
commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _stream_search(self, query):
"""Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query)
@wrap_exceptions
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self._stream_search(query)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r
| apache-2.0 |
culots/kernel_lge_madai | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
ehirt/odoo | addons/email_template/__init__.py | 381 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template
import wizard
import res_partner
import ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mrtnrdl/.macdots | scripts/bin/platform-tools/systrace/catapult/dependency_manager/dependency_manager/cloud_storage_info_unittest.py | 4 | 10473 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import stat
import unittest
import mock
from pyfakefs import fake_filesystem_unittest
from py_utils import cloud_storage
from dependency_manager import archive_info
from dependency_manager import cloud_storage_info
from dependency_manager import exceptions
class CloudStorageInfoTest(unittest.TestCase):
def testInitCloudStorageInfoErrors(self):
# Must specify cloud storage information atomically.
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, 'download_path', None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', 'download_path', None)
def testInitWithVersion(self):
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', version_in_cs='version_in_cs')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, 'cs_hash',
'download_path', 'cs_remote_path', version_in_cs='version_in_cs')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
version_in_cs='version_in_cs')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitWithArchiveInfoErrors(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None, None,
archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, 'cs_bucket', 'cs_hash',
None, 'cs_remote_path', archive_info=zip_info)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash',
'cs_remote_path', None, version_in_cs='version',
archive_info=zip_info)
def testInitWithArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertFalse(cs_info._version_in_cs)
def testInitWithVersionAndArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path',
'cs_remote_path', version_in_cs='version_in_cs',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitMinimumCloudStorageInfo(self):
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket',
'cs_hash', 'download_path',
'cs_remote_path')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertFalse(cs_info._version_in_cs)
self.assertFalse(cs_info._archive_info)
class TestGetRemotePath(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.config_path = '/test/dep_config.json'
self.fs.CreateFile(self.config_path, contents='{}')
self.download_path = '/foo/download_path'
self.fs.CreateFile(
self.download_path, contents='1010110', st_mode=stat.S_IWOTH)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4',)
def tearDown(self):
self.tearDownPyfakefs()
@mock.patch(
'py_utils.cloud_storage.GetIfHashChanged')
def testGetRemotePathNoArchive(self, cs_get_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
# All of the needed information is given, and the downloaded path exists
# after calling cloud storage.
self.assertEqual(
os.path.abspath(self.download_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.stat(self.download_path).st_mode & stat.S_IXUSR)
# All of the needed information is given, but the downloaded path doesn't
# exists after calling cloud storage.
self.fs.RemoveObject(self.download_path)
cs_get_mock.side_effect = [True]
self.assertRaises(
exceptions.FileNotFoundError, self.cs_info.GetRemotePath)
@mock.patch(
'dependency_manager.dependency_manager_util.UnzipArchive')
@mock.patch(
'dependency_manager.cloud_storage_info.cloud_storage.GetIfHashChanged') # pylint: disable=line-too-long
def testGetRemotePathWithArchive(self, cs_get_mock, unzip_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
unzip_path = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir')
path_within_archive = os.path.join('path', 'within', 'archive')
dep_path = os.path.join(unzip_path, path_within_archive)
def _UnzipFileMock(archive_file, unzip_location, tmp_location=None):
del archive_file, tmp_location
self.fs.CreateFile(dep_path)
self.fs.CreateFile(os.path.join(unzip_location, 'extra', 'path'))
self.fs.CreateFile(os.path.join(unzip_location, 'another_extra_path'))
unzip_mock.side_effect = _UnzipFileMock
# Create a stale directory that's expected to get deleted
stale_unzip_path_glob = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir_*')
stale_path = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir_stale')
self.fs.CreateDirectory(stale_path)
self.fs.CreateFile(os.path.join(stale_path, 'some_file'))
self.assertFalse(os.path.exists(dep_path))
zip_info = archive_info.ArchiveInfo(
self.download_path, unzip_path, path_within_archive,
stale_unzip_path_glob)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4', archive_info=zip_info)
self.assertFalse(unzip_mock.called)
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.path.exists(dep_path))
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
unzip_mock.assert_called_once_with(self.download_path, unzip_path)
# Stale directory should have been deleted
self.assertFalse(os.path.exists(stale_path))
# Should not need to unzip a second time, but should return the same path.
unzip_mock.reset_mock()
self.assertTrue(os.path.exists(dep_path))
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
self.assertFalse(unzip_mock.called)
@mock.patch(
'py_utils.cloud_storage.GetIfHashChanged')
def testGetRemotePathCloudStorageErrors(self, cs_get_mock):
cs_get_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.ServerError
self.assertRaises(cloud_storage.ServerError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.NotFoundError
self.assertRaises(cloud_storage.NotFoundError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
self.cs_info.GetRemotePath)
| unlicense |
truongdq/chainer | tests/cupy_tests/test_ndarray_get.py | 5 | 1384 | import unittest
import cupy
from cupy import cuda
from cupy import testing
import numpy
from numpy import testing as np_testing
@testing.gpu
class TestArrayGet(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.stream = cuda.Stream()
def check_get(self, f, stream):
a_gpu = f(cupy)
a_cpu = f(numpy)
np_testing.assert_array_equal(a_gpu.get(stream), a_cpu)
@testing.for_all_dtypes()
def test_contiguous_array(self, dtype):
contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)
self.check_get(contiguous_array, None)
@testing.for_all_dtypes()
def test_non_contiguous_array(self, dtype):
non_contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)[0::2]
self.check_get(non_contiguous_array, None)
@testing.for_all_dtypes()
def test_contiguous_array_stream(self, dtype):
contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)
self.check_get(contiguous_array, self.stream.ptr)
@testing.for_all_dtypes()
def test_non_contiguous_array_stream(self, dtype):
non_contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)[0::2]
self.check_get(non_contiguous_array, self.stream.ptr)
| mit |
muntasirsyed/intellij-community | python/lib/Lib/distutils/command/install.py | 92 | 23567 | """distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install.py 43363 2006-03-27 21:55:21Z phillip.eby $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.errors import DistutilsOptionError
from glob import glob
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
'mac': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'java': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
}
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if self.negative_opt.has_key(opt_name):
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
| apache-2.0 |
BehavioralInsightsTeam/edx-platform | lms/djangoapps/static_template_view/urls.py | 15 | 2320 | """
URLs for static_template_view app
"""
from django.conf import settings
from django.conf.urls import url
from static_template_view import views
urlpatterns = [
# Semi-static views (these need to be rendered and have the login bar, but don't change)
url(r'^404$', views.render, {'template': '404.html'}, name="404"),
# display error page templates, for testing purposes
url(r'^404$', views.render_404, name='static_template_view.views.render_404'),
url(r'^500$', views.render_500, name='static_template_view.views.render_500'),
url(r'^blog$', views.render, {'template': 'blog.html'}, name="blog"),
url(r'^contact$', views.render, {'template': 'contact.html'}, name="contact"),
url(r'^donate$', views.render, {'template': 'donate.html'}, name="donate"),
url(r'^faq$', views.render, {'template': 'faq.html'}, name="faq"),
url(r'^help$', views.render, {'template': 'help.html'}, name="help_edx"),
url(r'^jobs$', views.render, {'template': 'jobs.html'}, name="jobs"),
url(r'^news$', views.render, {'template': 'news.html'}, name="news"),
url(r'^press$', views.render, {'template': 'press.html'}, name="press"),
url(r'^media-kit$', views.render, {'template': 'media-kit.html'}, name="media-kit"),
url(r'^copyright$', views.render, {'template': 'copyright.html'}, name="copyright"),
# Press releases
url(r'^press/([_a-zA-Z0-9-]+)$', views.render_press_release, name='press_release'),
]
# Only enable URLs for those marketing links actually enabled in the
# settings. Disable URLs by marking them as None.
for key, value in settings.MKTG_URL_LINK_MAP.items():
# Skip disabled URLs
if value is None:
continue
# These urls are enabled separately
if key == "ROOT" or key == "COURSES":
continue
# The MKTG_URL_LINK_MAP key specifies the template filename
template = key.lower()
if '.' not in template:
# Append STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION if
# no file extension was specified in the key
template = "%s.%s" % (template, settings.STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION)
# Make the assumption that the URL we want is the lowercased
# version of the map key
urlpatterns.append(url(r'^%s$' % key.lower(), views.render, {'template': template}, name=value))
| agpl-3.0 |
KshitijKarthick/tvecs | tvecs/visualization/server.py | 1 | 12380 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""CherryPy Server to provide recommendations of semantic similarity."""
import os
import json
import codecs
import cherrypy
import argparse
import configparser
from gensim.models import Word2Vec
from nltk.tokenize import word_tokenize
from jinja2 import Environment, FileSystemLoader
from cherrypy.lib.static import serve_file
from functools import reduce
from tvecs.preprocessor import yandex_api as yandex
from tvecs.vector_space_mapper.vector_space_mapper import VectorSpaceMapper
class Server(object):
"""
Server Configuration for t-vex.
.. seealso::
* :mod:`cherrypy`
"""
def __init__(self):
"""Initialization the Language and Model."""
self.model = {
"english": Server._load_model("english"),
"hindi": Server._load_model("hindi"),
}
self.cross_lang_vm = {
("english", "hindi"): self._create_vector_space_mapper("english", "hindi"),
("hindi", "english"): self._create_vector_space_mapper("hindi", "english"),
}
self.cache_file_path = os.path.join(
"tvecs", "visualization", "cached_dictionary"
)
if not os.path.exists(self.cache_file_path):
json.dump({}, codecs.open(self.cache_file_path, "w", encoding="utf-8"))
self.cached_dictionary = {}
with codecs.open(self.cache_file_path, "r", encoding="utf-8") as f:
self.cached_dictionary = json.load(f)
@cherrypy.expose
def index(self):
"""Semantic spac visualization html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "index.html")
)
)
@cherrypy.expose
def multivariate_analysis(self):
"""Parallel Coordinates for multivariate analysis html page return."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "multivariate.html")
)
)
@cherrypy.expose
def cross_lingual(self):
"""Cross Lingual recommender html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "cross_lingual.html")
)
)
@cherrypy.expose
def distances(self):
"""Visualization with distances html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "distances.html")
)
)
@cherrypy.expose
def lingual_semantics(self):
"""Semantically related words in same language returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "intra_language.html")
)
)
def retrieve_meaning(self, language, word):
"""
Optional: Translate the word.
Retrieve Eng definition(s) of a word from cached file or PyDictionary.
API Documentation
:param language: Language for which definition needed
:param word: Word whose definition needs to be retrieved
:type language: String
:type word: String
:return: word and definition
:rtype: :class:`String`
"""
from PyDictionary import PyDictionary
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word = word.lower()
trword = word
if word in self.cached_dictionary:
return json.dumps(self.cached_dictionary[word])
else:
if language == "hindi":
trword = yandex.get_translation(word, "hi-en")
dictionary = PyDictionary(trword)
meanings = [trword, dictionary.meaning(trword)]
if meanings[1]:
self.cached_dictionary[word] = meanings
with codecs.open(self.cache_file_path, "w", encoding="utf-8") as f:
f.write(json.dumps(self.cached_dictionary))
return json.dumps(meanings)
@cherrypy.expose
def get_distance(self, word1, word2, language1, language2):
"""
Retrieve cosine distance between word1 and word2.
- word1 and word2 have to be in the vocabulary
of language1 and language2, respectively.
API Documentation
:param word1: A word in language1's vocabulary
:param language1: Language of word1
:param word2: A word in language2's vocabulary
:param language2: Language of word2
:type word1: String
:type language1: String
:type word2: String
:type language2: String
:return: Dictionary with keys 'word1', 'word2', and 'distance'
:rtype: :class:`Dictionary`
.. py:currentmodule:: tvecs.vector_space_mapper.vector_space_mapper
.. seealso::
* :func:`VectorSpaceMapper.obtain_cosine_similarity`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word1 = word1.lower()
word2 = word2.lower()
vm = self.cross_lang_vm.get((language1, language2))
similarity = None
if vm is not None:
similarity = vm.obtain_cosine_similarity(word1, word2)
distance = 1 - similarity if similarity is not None else None
return json.dumps({"word1": word1, "word2": word2, "distance": distance})
@cherrypy.expose
def retrieve_recommendations(self, language, word, limit=10):
"""
Retrieve number of semantically similar recommendations.
- For specified word in the given lang retrieve limit recommendations
API Documentation
:param language: Language for which recommendations required
:param word: Semantic similar words provided for given word
:param limit: No of words to be recommended [ Default 10 ]
:type language: String
:type word: String
:type limit: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :class:`gensim.models.Word2Vec`
"""
word = word.lower()
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
model = self.model.get(language)
if model is not None:
data = Server._recommend(word, int(limit), fn=model.most_similar)
else:
data = json.dumps(None)
return data
@cherrypy.expose
def get_cross_lingual_recommendations(self, lang1, lang2, word, topn=10):
"""
Provide cross lingual recommendations.
API Documentation
:param lang1: Language 1 for cross lingual recommendations.
:param lang2: Language 2 for cross lingual recommendations.
:param word: Word utilised for cross lingual recommendations.
:param topn: No of recommendations provided.
:type lang1: String
:type lang2: String
:type word: String
:type topn: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
sentence = word_tokenize(word.lower())
vm = self.cross_lang_vm.get((lang1, lang2))
data = None
if vm is not None:
result_vec = reduce(
lambda x, y: x + y, [self.model[lang1][word] for word in sentence]
)
data = Server._recommend(
result_vec, int(topn), fn=vm.get_recommendations_from_vec
)
return data
@cherrypy.expose
def _create_vector_space_mapper(self, lang1, lang2):
"""
Create Vector Space Mapper between Languages.
API Documentation
:param lang1: Language 1 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:param lang2: Language 2 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:return: JSON with successful/failure message
:rtype: JSON
.. seealso::
:mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
vm = None
with codecs.open(
os.path.join(
"data", "bilingual_dictionary", "%s_%s_train_bd" % (lang1, lang2)
),
"r",
encoding="utf-8",
) as file:
data = file.read().split("\n")
bilingual_dict = [(line.split(" ")[0], line.split(" ")[1]) for line in data]
if (self.model.get(lang1) is not None) and (
self.model.get(lang2) is not None
):
vm = VectorSpaceMapper(
self.model[lang1], self.model[lang2], bilingual_dict
)
vm.map_vector_spaces()
return vm
@staticmethod
def _recommend(word, limit, fn):
"""Vector Space Mapper recommend functionality."""
try:
vec_list = fn(word, topn=limit)
except KeyError:
vec_list = None
if vec_list is not None:
data = json.dumps([{"word": tup[0], "weight": tup[1]} for tup in vec_list])
else:
data = json.dumps(None)
return data
@staticmethod
def _load_model(language):
"""Used to load Word2Vec Model."""
return Word2Vec.load(
os.path.join("data", "models", "t-vex-%s-model" % language)
)
if __name__ == "__main__":
"""Setting up the Server with Specified Configuration"""
parser = argparse.ArgumentParser(description="Obtain Server Configuration")
parser.add_argument(
"-c",
"--config",
dest="config",
help="Config File Path",
action="store",
type=str,
default=os.path.join("tvecs", "visualization", "server.conf"),
)
parser.add_argument(
"-p", "--port", dest="port", help="Port", action="store", type=int, default=None
)
parser.add_argument(
"-s",
"--host",
dest="host",
help="Host Name",
action="store",
type=str,
default=None,
)
args = parser.parse_args()
server_config = configparser.RawConfigParser()
env = Environment(loader=FileSystemLoader("static"))
conf = {
"/": {"tools.staticdir.root": os.path.abspath(os.getcwd())},
"/js": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "js"
),
},
"/css": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "css"
),
},
"/images": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "images"
),
},
"/resources": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "resources"
),
},
}
server_port = args.port
server_host = args.host
server_config.read(args.config)
if args.port is None:
server_port = server_config.get("Server", "port")
if args.host is None:
server_host = server_config.get("Server", "host")
thread_pool = server_config.get("Server", "thread_pool")
queue_size = server_config.get("Server", "queue_size")
cherrypy.config.update({"server.socket_host": server_host})
cherrypy.config.update({"server.thread_pool": int(thread_pool)})
cherrypy.config.update({"server.socket_queue_size": int(queue_size)})
cherrypy.config.update(
{"server.socket_port": int(os.environ.get("PORT", server_port))}
)
cherrypy.quickstart(Server(), "/", conf)
| mit |
rdipietro/tensorflow | tensorflow/python/summary/event_accumulator_test.py | 3 | 30392 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator as ea
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer
from tensorflow.python.training import saver
class _EventGenerator(object):
"""Class that can add_events and then yield them back.
Satisfies the EventGenerator API required for the EventAccumulator.
Satisfies the EventWriter API required to create a SummaryWriter.
Has additional convenience methods for adding test events.
"""
def __init__(self, zero_out_timestamps=False):
self.items = []
self.zero_out_timestamps = zero_out_timestamps
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(
value=[tf.Summary.Value(
tag=tag, simple_value=value)]))
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = tf.HistogramProto(min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width,
height=height)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
if self.zero_out_timestamps:
event.wall_time = 0
self.items.append(event)
def add_event(self, event): # pylint: disable=invalid-name
"""Match the EventWriter API."""
self.AddEvent(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (
7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testCompressHistogram_uglyHistogram(self):
bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
histogram_values = ea.HistogramValue(
min=0.0,
max=1.0,
num=960.0,
sum=64.0,
sum_squares=64.0,
bucket_limit=[
0.0, 1e-12, 0.917246389039776, 1.0089710279437536,
1.7976931348623157e+308
],
bucket=[0.0, 896.0, 0.0, 64.0, 0.0])
histogram_event = ea.HistogramEvent(0, 0, histogram_values)
compressed_event = ea._CompressHistogram(histogram_event, bps)
vals = compressed_event.compressed_histogram_values
self.assertEquals(tuple(v.basis_point for v in vals), bps)
self.assertAlmostEqual(vals[0].value, 0.0)
self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14)
self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13)
self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13)
self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13)
self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13)
self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13)
self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13)
self.assertAlmostEqual(vals[8].value, 1.0)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage('im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage('im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio('snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio('snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.summary.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300, 101,
201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.summary.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.summary.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = tf.summary.SessionLog(status=tf.summary.SessionLog.START)
gen.AddEvent(tf.summary.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
def testFirstEventTimestamp(self):
"""Test that FirstEventTimestamp() returns wall_time of the first event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=10, step=20, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=30, step=40, value=20)
self.assertEqual(acc.FirstEventTimestamp(), 10)
def testReloadPopulatesFirstEventTimestamp(self):
"""Test that Reload() means FirstEventTimestamp() won't load events."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
acc.Reload()
def _Die(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError('Load() should not be called')
self.stubs.Set(gen, 'Load', _Die)
self.assertEqual(acc.FirstEventTimestamp(), 1)
def testFirstEventTimestampLoadsEvent(self):
"""Test that FirstEventTimestamp() doesn't discard the loaded event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
self.assertEqual(acc.FirstEventTimestamp(), 1)
acc.Reload()
self.assertEqual(acc.file_version, 2.0)
def testTFSummaryScalar(self):
"""Verify processing of tf.summary.scalar, which uses TensorSummary op."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.placeholder(tf.float32)
tf.summary.scalar('scalar1', ipt)
tf.summary.scalar('scalar2', ipt * ipt)
merged = tf.contrib.deprecated.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged, feed_dict={ipt: i})
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
seq1 = [ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)]
seq2 = [
ea.ScalarEvent(
wall_time=0, step=i, value=i * i) for i in xrange(10)
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['scalar1', 'scalar2'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
self.assertEqual(accumulator.Scalars('scalar1'), seq1)
self.assertEqual(accumulator.Scalars('scalar2'), seq2)
first_value = accumulator.Scalars('scalar1')[0].value
self.assertTrue(isinstance(first_value, float))
def testTFSummaryImage(self):
"""Verify processing of tf.summary.image."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.ones([10, 4, 4, 3], tf.uint8)
# This is an interesting example, because the old tf.image_summary op
# would throw an error here, because it would be tag reuse.
# Using the tf node name instead allows argument re-use to the image
# summary.
with tf.name_scope('1'):
tf.summary.image('images', ipt, max_outputs=1)
with tf.name_scope('2'):
tf.summary.image('images', ipt, max_outputs=2)
with tf.name_scope('3'):
tf.summary.image('images', ipt, max_outputs=3)
merged = tf.contrib.deprecated.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged)
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
tags = [
u'1/images/image', u'2/images/image/0', u'2/images/image/1',
u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: tags,
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.summary.FileWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer.
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: ['test run']
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
def testGraphFromMetaGraphBecomesAvailable(self):
"""Test accumulator by writing values and then reading them."""
directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.summary.FileWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: []
})
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
bonitadecker77/python-for-android | python3-alpha/extra_modules/gdata/notebook/data.py | 125 | 1426 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Notebook Data API"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
NB_TEMPLATE = '{http://schemas.google.com/notes/2008/}%s'
class ComesAfter(atom.core.XmlElement):
"""Preceding element."""
_qname = NB_TEMPLATE % 'comesAfter'
id = 'id'
class NoteEntry(gdata.data.GDEntry):
"""Describes a note entry in the feed of a user's notebook."""
class NotebookFeed(gdata.data.GDFeed):
"""Describes a notebook feed."""
entry = [NoteEntry]
class NotebookListEntry(gdata.data.GDEntry):
"""Describes a note list entry in the feed of a user's list of public notebooks."""
class NotebookListFeed(gdata.data.GDFeed):
"""Describes a notebook list feed."""
entry = [NotebookListEntry]
| apache-2.0 |
albertz/music-player | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsimage.py | 3 | 9049 | from PyObjCTools.TestSupport import *
import AppKit
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSImageHelper (NSObject):
def image_didLoadRepresentation_withStatus_(self, i, r, s): pass
def image_didLoadPartOfRepresentation_withValidRows_(self, i, r, c): pass
class TestNSImage (TestCase):
def test_compositePoint(self):
# comes straight from ReSTedit. Works on PPC, not on Intel (as of r1791)
ws = AppKit.NSWorkspace.sharedWorkspace()
txtIcon = ws.iconForFileType_("txt")
txtIcon.setSize_( (16,16) )
htmlIcon = ws.iconForFileType_("html")
htmlIcon.setSize_( (16,16) )
comboIcon = AppKit.NSImage.alloc().initWithSize_( (100,100) )
comboIcon.lockFocus()
txtIcon.compositeToPoint_fromRect_operation_((0,0), ((0,0),(16,16)), AppKit.NSCompositeCopy)
htmlIcon.compositeToPoint_fromRect_operation_((8,0), ((8,0),(8,16)), AppKit.NSCompositeCopy)
comboIcon.unlockFocus()
def testConstants(self):
self.assertEqual(NSImageLoadStatusCompleted, 0)
self.assertEqual(NSImageLoadStatusCancelled, 1)
self.assertEqual(NSImageLoadStatusInvalidData, 2)
self.assertEqual(NSImageLoadStatusUnexpectedEOF, 3)
self.assertEqual(NSImageLoadStatusReadError, 4)
self.assertEqual(NSImageCacheDefault, 0)
self.assertEqual(NSImageCacheAlways, 1)
self.assertEqual(NSImageCacheBySize, 2)
self.assertEqual(NSImageCacheNever, 3)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance( NSImageNameQuickLookTemplate, unicode)
self.assertIsInstance( NSImageNameBluetoothTemplate, unicode)
self.assertIsInstance( NSImageNameIChatTheaterTemplate, unicode)
self.assertIsInstance( NSImageNameSlideshowTemplate, unicode)
self.assertIsInstance( NSImageNameActionTemplate, unicode)
self.assertIsInstance( NSImageNameSmartBadgeTemplate, unicode)
self.assertIsInstance( NSImageNameIconViewTemplate, unicode)
self.assertIsInstance( NSImageNameListViewTemplate, unicode)
self.assertIsInstance( NSImageNameColumnViewTemplate, unicode)
self.assertIsInstance( NSImageNameFlowViewTemplate, unicode)
self.assertIsInstance( NSImageNamePathTemplate, unicode)
self.assertIsInstance( NSImageNameInvalidDataFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameLockLockedTemplate, unicode)
self.assertIsInstance( NSImageNameLockUnlockedTemplate, unicode)
self.assertIsInstance( NSImageNameGoRightTemplate, unicode)
self.assertIsInstance( NSImageNameGoLeftTemplate, unicode)
self.assertIsInstance( NSImageNameRightFacingTriangleTemplate, unicode)
self.assertIsInstance( NSImageNameLeftFacingTriangleTemplate, unicode)
self.assertIsInstance( NSImageNameAddTemplate, unicode)
self.assertIsInstance( NSImageNameRemoveTemplate, unicode)
self.assertIsInstance( NSImageNameRevealFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameFollowLinkFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameEnterFullScreenTemplate, unicode)
self.assertIsInstance( NSImageNameExitFullScreenTemplate, unicode)
self.assertIsInstance( NSImageNameStopProgressTemplate, unicode)
self.assertIsInstance( NSImageNameStopProgressFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameRefreshTemplate, unicode)
self.assertIsInstance( NSImageNameRefreshFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameBonjour, unicode)
self.assertIsInstance( NSImageNameDotMac, unicode)
self.assertIsInstance( NSImageNameComputer, unicode)
self.assertIsInstance( NSImageNameFolderBurnable, unicode)
self.assertIsInstance( NSImageNameFolderSmart, unicode)
self.assertIsInstance( NSImageNameNetwork, unicode)
self.assertIsInstance( NSImageNameMultipleDocuments, unicode)
self.assertIsInstance( NSImageNameUserAccounts, unicode)
self.assertIsInstance( NSImageNamePreferencesGeneral, unicode)
self.assertIsInstance( NSImageNameAdvanced, unicode)
self.assertIsInstance( NSImageNameInfo, unicode)
self.assertIsInstance( NSImageNameFontPanel, unicode)
self.assertIsInstance( NSImageNameColorPanel, unicode)
self.assertIsInstance( NSImageNameUser, unicode)
self.assertIsInstance( NSImageNameUserGroup, unicode)
self.assertIsInstance( NSImageNameEveryone, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSImage.setName_)
self.assertArgIsBOOL(NSImage.setScalesWhenResized_, 0)
self.assertResultIsBOOL(NSImage.scalesWhenResized)
self.assertArgIsBOOL(NSImage.setDataRetained_, 0)
self.assertResultIsBOOL(NSImage.isDataRetained)
self.assertArgIsBOOL(NSImage.setCachedSeparately_, 0)
self.assertResultIsBOOL(NSImage.isCachedSeparately)
self.assertArgIsBOOL(NSImage.setCacheDepthMatchesImageDepth_, 0)
self.assertResultIsBOOL(NSImage.cacheDepthMatchesImageDepth)
self.assertArgIsBOOL(NSImage.setUsesEPSOnResolutionMismatch_, 0)
self.assertResultIsBOOL(NSImage.usesEPSOnResolutionMismatch)
self.assertArgIsBOOL(NSImage.setPrefersColorMatch_, 0)
self.assertResultIsBOOL(NSImage.prefersColorMatch)
self.assertArgIsBOOL(NSImage.setMatchesOnMultipleResolution_, 0)
self.assertResultIsBOOL(NSImage.matchesOnMultipleResolution)
self.assertResultIsBOOL(NSImage.drawRepresentation_inRect_)
self.assertResultIsBOOL(NSImage.isValid)
self.assertResultIsBOOL(NSImage.canInitWithPasteboard_)
self.assertResultIsBOOL(NSImage.isFlipped)
self.assertArgIsBOOL(NSImage.setFlipped_, 0)
self.assertResultIsBOOL(NSImage.isTemplate)
self.assertArgIsBOOL(NSImage.setTemplate_, 0)
def testProtocols(self):
self.assertArgHasType(TestNSImageHelper.image_didLoadPartOfRepresentation_withValidRows_, 2, objc._C_NSInteger)
self.assertArgHasType(TestNSImageHelper.image_didLoadRepresentation_withStatus_, 2, objc._C_NSUInteger)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertArgHasType(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_,
0, NSRect.__typestr__)
self.assertArgIsBOOL(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_, 4)
self.assertArgIsBOOL(NSImage.lockFocusFlipped_, 0)
self.assertArgHasType(NSImage.initWithCGImage_size_, 1, NSSize.__typestr__)
self.assertArgHasType(NSImage.CGImageForProposedRect_context_hints_, 0, b'o^' + NSRect.__typestr__)
self.assertArgHasType(NSImage.bestRepresentationForRect_context_hints_, 0, NSRect.__typestr__)
self.assertResultIsBOOL(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_)
self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_, 0, NSRect.__typestr__)
self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_, 1, NSRect.__typestr__)
@min_os_level('10.7')
def testMethods10_7(self):
self.assertResultIsBOOL(NSImage.matchesOnlyOnBestFittingAxis)
self.assertArgIsBOOL(NSImage.setMatchesOnlyOnBestFittingAxis_, 0)
@min_os_level('10.8')
def testMethods10_8(self):
self.assertArgIsBOOL(NSImage.imageWithSize_flipped_drawingHandler_, 1)
self.assertArgIsBlock(NSImage.imageWithSize_flipped_drawingHandler_, 2,
objc._C_NSBOOL + NSRect.__typestr__)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(NSImageHintCTM, unicode)
self.assertIsInstance(NSImageHintInterpolation, unicode)
self.assertIsInstance(NSImageNameFolder, unicode)
self.assertIsInstance(NSImageNameMobileMe, unicode)
self.assertIsInstance(NSImageNameUserGuest, unicode)
self.assertIsInstance(NSImageNameMenuOnStateTemplate, unicode)
self.assertIsInstance(NSImageNameMenuMixedStateTemplate, unicode)
self.assertIsInstance(NSImageNameApplicationIcon, unicode)
self.assertIsInstance(NSImageNameTrashEmpty, unicode)
self.assertIsInstance(NSImageNameTrashFull, unicode)
self.assertIsInstance(NSImageNameHomeTemplate, unicode)
self.assertIsInstance(NSImageNameBookmarksTemplate, unicode)
self.assertIsInstance(NSImageNameCaution, unicode)
self.assertIsInstance(NSImageNameStatusAvailable, unicode)
self.assertIsInstance(NSImageNameStatusPartiallyAvailable, unicode)
self.assertIsInstance(NSImageNameStatusUnavailable, unicode)
self.assertIsInstance(NSImageNameStatusNone, unicode)
@min_os_level('10.8')
def testConstants10_8(self):
self.assertIsInstance(NSImageNameShareTemplate, unicode)
if __name__ == "__main__":
main()
| bsd-2-clause |
xuyuhan/depot_tools | third_party/logilab/common/sphinx_ext.py | 117 | 3329 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
from logilab.common.decorators import monkeypatch
from sphinx.ext import autodoc
class DocstringOnlyModuleDocumenter(autodoc.ModuleDocumenter):
objtype = 'docstring'
def format_signature(self):
pass
def add_directive_header(self, sig):
pass
def document_members(self, all_members=False):
pass
def resolve_name(self, modname, parents, path, base):
if modname is not None:
return modname, parents + [base]
return (path or '') + base, []
#autodoc.add_documenter(DocstringOnlyModuleDocumenter)
def setup(app):
app.add_autodocumenter(DocstringOnlyModuleDocumenter)
from sphinx.ext.autodoc import (ViewList, Options, AutodocReporter, nodes,
assemble_option_dict, nested_parse_with_titles)
@monkeypatch(autodoc.AutoDirective)
def run(self):
self.filename_set = set() # a set of dependent filenames
self.reporter = self.state.document.reporter
self.env = self.state.document.settings.env
self.warnings = []
self.result = ViewList()
# find out what documenter to call
objtype = self.name[4:]
doc_class = self._registry[objtype]
# process the options with the selected documenter's option_spec
self.genopt = Options(assemble_option_dict(
self.options.items(), doc_class.option_spec))
# generate the output
documenter = doc_class(self, self.arguments[0])
documenter.generate(more_content=self.content)
if not self.result:
return self.warnings
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for fn in self.filename_set:
self.env.note_dependency(fn)
# use a custom reporter that correctly assigns lines to source
# filename/description and lineno
old_reporter = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(self.result,
self.state.memo.reporter)
if self.name in ('automodule', 'autodocstring'):
node = nodes.section()
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, self.result, node)
else:
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
self.state.memo.reporter = old_reporter
return self.warnings + node.children
| bsd-3-clause |
mer-hybris/android_kernel_lge_hammerhead | scripts/gcc-wrapper.py | 1276 | 3382 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
hazelcast/hazelcast-python-client | tests/unit/discovery/hazelcast_cloud_discovery_test.py | 1 | 5900 | import ssl
import os
import threading
from hazelcast.six.moves import BaseHTTPServer
from hazelcast import six
from unittest import TestCase
from hazelcast.core import Address
from hazelcast.errors import HazelcastCertificationError
from hazelcast.discovery import HazelcastCloudDiscovery
from hazelcast.client import HazelcastClient
from tests.util import get_abs_path
TOKEN = "123abc456"
PRIVATE_LINK_TOKEN = "abc123def"
CLOUD_URL = HazelcastCloudDiscovery._CLOUD_URL_PATH
RESPONSE = """[
{"private-address":"10.47.0.8","public-address":"54.213.63.142:32298"},
{"private-address":"10.47.0.9","public-address":"54.245.77.185:32298"},
{"private-address":"10.47.0.10","public-address":"54.186.232.37:32298"}
]"""
PRIVATE_LINK_RESPONSE = """[
{"private-address":"100.96.5.1:5701","public-address":"10.113.44.139:31115"},
{"private-address":"100.96.4.2:5701","public-address":"10.113.44.130:31115"}
]"""
HOST = "localhost"
ADDRESSES = {
Address("10.47.0.8", 32298): Address("54.213.63.142", 32298),
Address("10.47.0.9", 32298): Address("54.245.77.185", 32298),
Address("10.47.0.10", 32298): Address("54.186.232.37", 32298),
}
PRIVATE_LINK_ADDRESSES = {
Address("100.96.5.1", 5701): Address("10.113.44.139", 31115),
Address("100.96.4.2", 5701): Address("10.113.44.130", 31115),
}
class CloudHTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
idx = self.path.find("=")
if idx > 0:
if self.path[: idx + 1] == CLOUD_URL:
# Found a cluster with the given token
token = self.path[idx + 1 :]
if token == TOKEN:
self._set_response(200, RESPONSE)
elif token == PRIVATE_LINK_TOKEN:
self._set_response(200, PRIVATE_LINK_RESPONSE)
# Can not find a cluster with the given token
else:
self._set_response(
404,
'{"message":"Cluster with token: ' + self.path[idx + 1 :] + ' not found."}',
)
else:
# Wrong URL
self._set_response(404, "default backend - 404")
def _set_response(self, status, message):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(six.b(message))
class Server(object):
cur_dir = os.path.dirname(__file__)
def __init__(self):
self.server = BaseHTTPServer.HTTPServer((HOST, 0), CloudHTTPHandler)
self.server.socket = ssl.wrap_socket(
self.server.socket,
get_abs_path(self.cur_dir, "key.pem"),
get_abs_path(self.cur_dir, "cert.pem"),
server_side=True,
)
self.port = self.server.socket.getsockname()[1]
def start_server(self):
self.server.serve_forever()
def close_server(self):
self.server.shutdown()
class TestClient(HazelcastClient):
def _start(self):
# Let the client to initialize the cloud address provider and translator, don't actually start it.
pass
class HazelcastCloudDiscoveryTest(TestCase):
cur_dir = os.path.dirname(__file__)
@classmethod
def setUpClass(cls):
cls.ctx = ssl.create_default_context(cafile=get_abs_path(cls.cur_dir, "cert.pem"))
cls.server = Server()
cls.server_thread = threading.Thread(target=cls.server.start_server)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server.close_server()
def test_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, ADDRESSES, addresses)
def test_private_link_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, PRIVATE_LINK_TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, PRIVATE_LINK_ADDRESSES, addresses)
def test_not_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, "INVALID_TOKEN")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_url(self):
discovery = create_discovery(HOST, self.server.port, "/INVALID_URL", "")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_certificates(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
with self.assertRaises(HazelcastCertificationError):
discovery.discover_nodes()
def test_client_with_cloud_discovery(self):
old = HazelcastCloudDiscovery._CLOUD_URL_BASE
try:
HazelcastCloudDiscovery._CLOUD_URL_BASE = "%s:%s" % (HOST, self.server.port)
client = TestClient(cloud_discovery_token=TOKEN)
client._address_provider.cloud_discovery._ctx = self.ctx
private_addresses, secondaries = client._address_provider.load_addresses()
six.assertCountEqual(self, list(ADDRESSES.keys()), private_addresses)
six.assertCountEqual(self, secondaries, [])
for private_address in private_addresses:
translated_address = client._address_provider.translate(private_address)
self.assertEqual(ADDRESSES[private_address], translated_address)
finally:
HazelcastCloudDiscovery._CLOUD_URL_BASE = old
def create_discovery(host, port, url, token, timeout=5.0):
discovery = HazelcastCloudDiscovery(token, timeout)
discovery._CLOUD_URL_BASE = "%s:%s" % (host, port)
discovery._CLOUD_URL_PATH = url
return discovery
| apache-2.0 |
renzoolivares/android_kernel_htc_monarudo | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
40223234/40223234 | static/Brython3.1.1-20150328-091302/Lib/posixpath.py | 722 | 14212 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
valid_types = all(isinstance(s, (str, bytes, bytearray))
for s in (a, ) + p)
if valid_types:
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components.") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| gpl-3.0 |
tensorflow/tensorflow | tensorflow/python/keras/initializers/initializers_v1.py | 6 | 4404 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 1."""
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import keras_export
_v1_zeros_initializer = init_ops.Zeros
_v1_ones_initializer = init_ops.Ones
_v1_constant_initializer = init_ops.Constant
_v1_variance_scaling_initializer = init_ops.VarianceScaling
_v1_orthogonal_initializer = init_ops.Orthogonal
_v1_identity = init_ops.Identity
_v1_glorot_uniform_initializer = init_ops.GlorotUniform
_v1_glorot_normal_initializer = init_ops.GlorotNormal
keras_export(v1=['keras.initializers.Zeros', 'keras.initializers.zeros'])(
_v1_zeros_initializer)
keras_export(v1=['keras.initializers.Ones', 'keras.initializers.ones'])(
_v1_ones_initializer)
keras_export(v1=['keras.initializers.Constant', 'keras.initializers.constant'])(
_v1_constant_initializer)
keras_export(v1=['keras.initializers.VarianceScaling'])(
_v1_variance_scaling_initializer)
keras_export(v1=['keras.initializers.Orthogonal',
'keras.initializers.orthogonal'])(_v1_orthogonal_initializer)
keras_export(v1=['keras.initializers.Identity',
'keras.initializers.identity'])(_v1_identity)
keras_export(v1=['keras.initializers.glorot_uniform'])(
_v1_glorot_uniform_initializer)
keras_export(v1=['keras.initializers.glorot_normal'])(
_v1_glorot_normal_initializer)
@keras_export(v1=['keras.initializers.RandomNormal',
'keras.initializers.random_normal',
'keras.initializers.normal'])
class RandomNormal(init_ops.RandomNormal):
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(RandomNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
'keras.initializers.uniform'])
class RandomUniform(init_ops.RandomUniform):
def __init__(self, minval=-0.05, maxval=0.05, seed=None,
dtype=dtypes.float32):
super(RandomUniform, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal'])
class TruncatedNormal(init_ops.TruncatedNormal):
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(TruncatedNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.lecun_normal'])
class LecunNormal(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.lecun_uniform'])
class LecunUniform(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.he_normal'])
class HeNormal(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.he_uniform'])
class HeUniform(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
| apache-2.0 |
mic4ael/indico | indico/core/db/sqlalchemy/searchable_titles.py | 1 | 1794 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import escape_like, preprocess_ts_string
from indico.util.decorators import strict_classproperty
class SearchableTitleMixin(object):
"""Mixin to add a fulltext-searchable title column."""
#: Whether the title column may not be empty
title_required = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
args = [
db.Index('ix_{}_title_fts'.format(cls.__tablename__), db.func.to_tsvector('simple', cls.title),
postgresql_using='gin')
]
if cls.title_required:
args.append(db.CheckConstraint("title != ''", 'valid_title'))
return tuple(args)
@declared_attr
def title(cls):
return db.Column(
db.String,
nullable=False
)
@classmethod
def title_matches(cls, search_string, exact=False):
"""Check whether the title matches a search string.
To be used in a SQLAlchemy `filter` call.
:param search_string: A string to search for
:param exact: Whether to search for the exact string
"""
crit = db.func.to_tsvector('simple', cls.title).match(preprocess_ts_string(search_string),
postgresql_regconfig='simple')
if exact:
crit = crit & cls.title.ilike('%{}%'.format(escape_like(search_string)))
return crit
| mit |
kubeflow/kfserving | python/kfserving/test/test_v1alpha2_tensorflow_spec.py | 1 | 1476 | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kfserving
from kfserving.models.v1alpha2_tensorflow_spec import V1alpha2TensorflowSpec # noqa: E501
from kfserving.rest import ApiException
class TestV1alpha2TensorflowSpec(unittest.TestCase):
"""V1alpha2TensorflowSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha2TensorflowSpec(self):
"""Test V1alpha2TensorflowSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kfserving.models.v1alpha2_tensorflow_spec.V1alpha2TensorflowSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
wavycloud/pyboto3 | pyboto3/glue.py | 1 | 692979 | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def batch_create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInputList=None):
"""
Creates one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInputList=[
{
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the catalog in which the partition is to be created. Currently, this should be the AWS account ID.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInputList: list
:param PartitionInputList: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be created.\n\n(dict) --The structure used to create and update a partition.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to create the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_connection(CatalogId=None, ConnectionNameList=None):
"""
Deletes a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_connection(
CatalogId='string',
ConnectionNameList=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type ConnectionNameList: list
:param ConnectionNameList: [REQUIRED]\nA list of names of the connections to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
Response Structure
(dict) --
Succeeded (list) --
A list of names of the connection definitions that were successfully deleted.
(string) --
Errors (dict) --
A map of the names of connections that were not successfully deleted to error details.
(string) --
(dict) --
Contains details about an error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
:returns:
(string) --
"""
pass
def batch_delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToDelete=None):
"""
Deletes one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToDelete=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partitions to be deleted.\n
:type PartitionsToDelete: list
:param PartitionsToDelete: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be deleted.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to delete the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_table(CatalogId=None, DatabaseName=None, TablesToDelete=None):
"""
Deletes multiple tables at once.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table(
CatalogId='string',
DatabaseName='string',
TablesToDelete=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the tables to delete reside. For Hive compatibility, this name is entirely lowercase.\n
:type TablesToDelete: list
:param TablesToDelete: [REQUIRED]\nA list of the table to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered in attempting to delete the specified tables.
(dict) --
An error record for table operations.
TableName (string) --
The name of the table. For Hive compatibility, this must be entirely lowercase.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionIds=None):
"""
Deletes a specified batch of versions of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionIds=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionIds: list
:param VersionIds: [REQUIRED]\nA list of the IDs of versions to be deleted. A VersionId is a string representation of an integer. Each version is incremented by 1.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered while trying to delete the specified table versions.
(dict) --
An error record for table-version operations.
TableName (string) --
The name of the table in question.
VersionId (string) --
The ID value of the version in question. A VersionID is a string representation of an integer. Each version is incremented by 1.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_get_crawlers(CrawlerNames=None):
"""
Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_crawlers(
CrawlerNames=[
'string',
]
)
:type CrawlerNames: list
:param CrawlerNames: [REQUIRED]\nA list of crawler names, which might be the names returned from the ListCrawlers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
Response Structure
(dict) --
Crawlers (list) --A list of crawler definitions.
(dict) --Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
CrawlersNotFound (list) --A list of names of crawlers that were not found.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_dev_endpoints(DevEndpointNames=None):
"""
Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_dev_endpoints(
DevEndpointNames=[
'string',
]
)
:type DevEndpointNames: list
:param DevEndpointNames: [REQUIRED]\nThe list of DevEndpoint names, which might be the names returned from the ListDevEndpoint operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
Response Structure
(dict) --
DevEndpoints (list) --A list of DevEndpoint definitions.
(dict) --A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
DevEndpointsNotFound (list) --A list of DevEndpoints not found.
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_jobs(JobNames=None):
"""
Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_jobs(
JobNames=[
'string',
]
)
:type JobNames: list
:param JobNames: [REQUIRED]\nA list of job names, which might be the names returned from the ListJobs operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
Response Structure
(dict) --
Jobs (list) --A list of job definitions.
(dict) --Specifies a job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
JobsNotFound (list) --A list of names of jobs not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToGet=None):
"""
Retrieves partitions in a batch request.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToGet=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type PartitionsToGet: list
:param PartitionsToGet: [REQUIRED]\nA list of partition values identifying the partitions to retrieve.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
Response Structure
(dict) --
Partitions (list) --
A list of the requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
UnprocessedKeys (list) --
A list of the partition values in the request for which partitions were not returned.
(dict) --
Contains a list of values defining partitions.
Values (list) --
The list of values.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def batch_get_triggers(TriggerNames=None):
"""
Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_triggers(
TriggerNames=[
'string',
]
)
:type TriggerNames: list
:param TriggerNames: [REQUIRED]\nA list of trigger names, which may be the names returned from the ListTriggers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
Response Structure
(dict) --
Triggers (list) --A list of trigger definitions.
(dict) --Information about a specific trigger.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
TriggersNotFound (list) --A list of names of triggers not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_workflows(Names=None, IncludeGraph=None):
"""
Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_workflows(
Names=[
'string',
],
IncludeGraph=True|False
)
:type Names: list
:param Names: [REQUIRED]\nA list of workflow names, which may be the names returned from the ListWorkflows operation.\n\n(string) --\n\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
Response Structure
(dict) --
Workflows (list) --
A list of workflow resource metadata.
(dict) --
A workflow represents a flow in which AWS Glue components should be executed to complete a logical task.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
MissingWorkflows (list) --
A list of names of workflows not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_stop_job_run(JobName=None, JobRunIds=None):
"""
Stops one or more job runs for a specified job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_stop_job_run(
JobName='string',
JobRunIds=[
'string',
]
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to stop job runs.\n
:type JobRunIds: list
:param JobRunIds: [REQUIRED]\nA list of the JobRunIds that should be stopped for that job definition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
SuccessfulSubmissions (list) --
A list of the JobRuns that were successfully submitted for stopping.
(dict) --
Records a successful request to stop a specified JobRun .
JobName (string) --
The name of the job definition used in the job run that was stopped.
JobRunId (string) --
The JobRunId of the job run that was stopped.
Errors (list) --
A list of the errors that were encountered in trying to stop JobRuns , including the JobRunId for which each error was encountered and details about the error.
(dict) --
Records an error that occurred when attempting to stop a specified job run.
JobName (string) --
The name of the job definition that is used in the job run in question.
JobRunId (string) --
The JobRunId of the job run in question.
ErrorDetail (dict) --
Specifies details about the error that was encountered.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def cancel_ml_task_run(TransformId=None, TaskRunId=None):
"""
Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run\'s parent transform\'s TransformID and the task run\'s TaskRunId .
See also: AWS API Documentation
Exceptions
:example: response = client.cancel_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nA unique identifier for the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the machine learning transform.
TaskRunId (string) --
The unique identifier for the task run.
Status (string) --
The status for this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Creates a classifier in the user\'s account. This can be a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field of the request is present.
See also: AWS API Documentation
Exceptions
:example: response = client.create_classifier(
GrokClassifier={
'Classification': 'string',
'Name': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Classification': 'string',
'Name': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nName (string) -- [REQUIRED]The name of the new classifier.\n\nGrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def create_connection(CatalogId=None, ConnectionInput=None):
"""
Creates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_connection(
CatalogId='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object defining the connection to create.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None, Tags=None):
"""
Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.
See also: AWS API Documentation
Exceptions
:example: response = client.create_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string',
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: [REQUIRED]\nThe IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.\n
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: [REQUIRED]\nA list of collection of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:type Tags: dict
:param Tags: The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {}
:returns:
(dict) --
"""
pass
def create_database(CatalogId=None, DatabaseInput=None):
"""
Creates a new database in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_database(
CatalogId='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nThe metadata for the database.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_dev_endpoint(EndpointName=None, RoleArn=None, SecurityGroupIds=None, SubnetId=None, PublicKey=None, PublicKeys=None, NumberOfNodes=None, WorkerType=None, GlueVersion=None, NumberOfWorkers=None, ExtraPythonLibsS3Path=None, ExtraJarsS3Path=None, SecurityConfiguration=None, Tags=None, Arguments=None):
"""
Creates a new development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.create_dev_endpoint(
EndpointName='string',
RoleArn='string',
SecurityGroupIds=[
'string',
],
SubnetId='string',
PublicKey='string',
PublicKeys=[
'string',
],
NumberOfNodes=123,
WorkerType='Standard'|'G.1X'|'G.2X',
GlueVersion='string',
NumberOfWorkers=123,
ExtraPythonLibsS3Path='string',
ExtraJarsS3Path='string',
SecurityConfiguration='string',
Tags={
'string': 'string'
},
Arguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name to be assigned to the new DevEndpoint .\n
:type RoleArn: string
:param RoleArn: [REQUIRED]\nThe IAM role for the DevEndpoint .\n
:type SecurityGroupIds: list
:param SecurityGroupIds: Security group IDs for the security groups to be used by the new DevEndpoint .\n\n(string) --\n\n
:type SubnetId: string
:param SubnetId: The subnet ID for the new DevEndpoint to use.
:type PublicKey: string
:param PublicKey: The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
:type PublicKeys: list
:param PublicKeys: A list of public keys to be used by the development endpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n
:type NumberOfNodes: integer
:param NumberOfNodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint .
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type ExtraPythonLibsS3Path: string
:param ExtraPythonLibsS3Path: The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\n\n
:type ExtraJarsS3Path: string
:param ExtraJarsS3Path: The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this DevEndpoint .
:type Tags: dict
:param Tags: The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type Arguments: dict
:param Arguments: A map of arguments used to configure the DevEndpoint .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
Response Structure
(dict) --
EndpointName (string) --
The name assigned to the new DevEndpoint .
Status (string) --
The current status of the new DevEndpoint .
SecurityGroupIds (list) --
The security groups assigned to the new DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID assigned to the new DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint .
YarnEndpointAddress (string) --
The address of the YARN endpoint used by this DevEndpoint .
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint .
ExtraJarsS3Path (string) --
Path to one or more Java .jar files in an S3 bucket that will be loaded in your DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure being used with this DevEndpoint .
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
Arguments (dict) --
The map of arguments used to configure this DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
:returns:
(string) --
"""
pass
def create_job(Name=None, Description=None, LogUri=None, Role=None, ExecutionProperty=None, Command=None, DefaultArguments=None, NonOverridableArguments=None, Connections=None, MaxRetries=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, Tags=None, NotificationProperty=None, GlueVersion=None, NumberOfWorkers=None, WorkerType=None):
"""
Creates a new job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_job(
Name='string',
Description='string',
LogUri='string',
Role='string',
ExecutionProperty={
'MaxConcurrentRuns': 123
},
Command={
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
DefaultArguments={
'string': 'string'
},
NonOverridableArguments={
'string': 'string'
},
Connections={
'Connections': [
'string',
]
},
MaxRetries=123,
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
Tags={
'string': 'string'
},
NotificationProperty={
'NotifyDelayAfter': 123
},
GlueVersion='string',
NumberOfWorkers=123,
WorkerType='Standard'|'G.1X'|'G.2X'
)
:type Name: string
:param Name: [REQUIRED]\nThe name you assign to this job definition. It must be unique in your account.\n
:type Description: string
:param Description: Description of the job being defined.
:type LogUri: string
:param LogUri: This field is reserved for future use.
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n
:type ExecutionProperty: dict
:param ExecutionProperty: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n
:type Command: dict
:param Command: [REQUIRED]\nThe JobCommand that executes this job.\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n
:type DefaultArguments: dict
:param DefaultArguments: The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NonOverridableArguments: dict
:param NonOverridableArguments: Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n
:type Connections: dict
:param Connections: The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry this job if it fails.
:type AllocatedCapacity: integer
:param AllocatedCapacity: This parameter is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job.
:type Tags: dict
:param Tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The unique name that was provided for this job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_ml_transform(Name=None, Description=None, InputRecordTables=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None, Tags=None):
"""
Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.
Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description , in addition to the parameters that you want to use for your algorithm.
You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role , and optionally, AllocatedCapacity , Timeout , and MaxRetries . For more information, see Jobs .
See also: AWS API Documentation
Exceptions
:example: response = client.create_ml_transform(
Name='string',
Description='string',
InputRecordTables=[
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe unique name that you give the transform when you create it.\n
:type Description: string
:param Description: A description of the machine learning transform that is being defined. The default is an empty string.
:type InputRecordTables: list
:param InputRecordTables: [REQUIRED]\nA list of AWS Glue table definitions used by the transform.\n\n(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.\n\nDatabaseName (string) -- [REQUIRED]A database name in the AWS Glue Data Catalog.\n\nTableName (string) -- [REQUIRED]A table name in the AWS Glue Data Catalog.\n\nCatalogId (string) --A unique identifier for the AWS Glue Data Catalog.\n\nConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.\n\n\n\n\n
:type Parameters: dict
:param Parameters: [REQUIRED]\nThe algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.\n\nThis role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\n\n
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\n
:type Timeout: integer
:param Timeout: The timeout of the task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:type Tags: dict
:param Tags: The tags to use with this machine learning transform. You may use tags to limit access to the machine learning transform. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
A unique identifier that is generated for the transform.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
"""
pass
def create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInput=None):
"""
Creates a new partition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The AWS account ID of the catalog in which the partition is to be created.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nA PartitionInput structure defining the partition to be created.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_script(DagNodes=None, DagEdges=None, Language=None):
"""
Transforms a directed acyclic graph (DAG) into code.
See also: AWS API Documentation
Exceptions
:example: response = client.create_script(
DagNodes=[
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
DagEdges=[
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
],
Language='PYTHON'|'SCALA'
)
:type DagNodes: list
:param DagNodes: A list of the nodes in the DAG.\n\n(dict) --Represents a node in a directed acyclic graph (DAG)\n\nId (string) -- [REQUIRED]A node identifier that is unique within the node\'s graph.\n\nNodeType (string) -- [REQUIRED]The type of node that this is.\n\nArgs (list) -- [REQUIRED]Properties of the node, in the form of name-value pairs.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nLineNumber (integer) --The line number of the node.\n\n\n\n\n
:type DagEdges: list
:param DagEdges: A list of the edges in the DAG.\n\n(dict) --Represents a directional edge in a directed acyclic graph (DAG).\n\nSource (string) -- [REQUIRED]The ID of the node at which the edge starts.\n\nTarget (string) -- [REQUIRED]The ID of the node at which the edge ends.\n\nTargetParameter (string) --The target of the edge.\n\n\n\n\n
:type Language: string
:param Language: The programming language of the resulting code from the DAG.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
The Python script generated from the DAG.
ScalaCode (string) --
The Scala code generated from the DAG.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def create_security_configuration(Name=None, EncryptionConfiguration=None):
"""
Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints .
See also: AWS API Documentation
Exceptions
:example: response = client.create_security_configuration(
Name='string',
EncryptionConfiguration={
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name for the new security configuration.\n
:type EncryptionConfiguration: dict
:param EncryptionConfiguration: [REQUIRED]\nThe encryption configuration for the new security configuration.\n\nS3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\n\n(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\n\nS3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\nCloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.\n\nCloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\nJobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.\n\nJobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Name (string) --
The name assigned to the new security configuration.
CreatedTimestamp (datetime) --
The time at which the new security configuration was created.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
"""
pass
def create_table(CatalogId=None, DatabaseName=None, TableInput=None):
"""
Creates a new table definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the Table . If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nThe TableInput object that defines the metadata table to create in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_trigger(Name=None, WorkflowName=None, Type=None, Schedule=None, Predicate=None, Actions=None, Description=None, StartOnCreation=None, Tags=None):
"""
Creates a new trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.create_trigger(
Name='string',
WorkflowName='string',
Type='SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
Schedule='string',
Predicate={
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
},
Actions=[
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
Description='string',
StartOnCreation=True|False,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger.\n
:type WorkflowName: string
:param WorkflowName: The name of the workflow associated with the trigger.
:type Type: string
:param Type: [REQUIRED]\nThe type of the new trigger.\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\nThis field is required when the trigger type is SCHEDULED.\n
:type Predicate: dict
:param Predicate: A predicate to specify when the new trigger should fire.\nThis field is required when the trigger type is CONDITIONAL .\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n
:type Actions: list
:param Actions: [REQUIRED]\nThe actions initiated by this trigger when it fires.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n
:type Description: string
:param Description: A description of the new trigger.
:type StartOnCreation: boolean
:param StartOnCreation: Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.
:type Tags: dict
:param Tags: The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the trigger.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_user_defined_function(CatalogId=None, DatabaseName=None, FunctionInput=None):
"""
Creates a new function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the function. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which to create the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that defines the function to create in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_workflow(Name=None, Description=None, DefaultRunProperties=None, Tags=None):
"""
Creates a new workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.create_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
},
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name to be assigned to the workflow. It should be unique within your account.\n
:type Description: string
:param Description: A description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:type Tags: dict
:param Tags: The tags to be used with this workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was provided as part of the request.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def delete_classifier(Name=None):
"""
Removes a classifier from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_connection(CatalogId=None, ConnectionName=None):
"""
Deletes a connection from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_connection(
CatalogId='string',
ConnectionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type ConnectionName: string
:param ConnectionName: [REQUIRED]\nThe name of the connection to delete.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_crawler(Name=None):
"""
Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler state is RUNNING .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_database(CatalogId=None, Name=None):
"""
Removes a specified database from a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to delete. For Hive compatibility, this must be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_dev_endpoint(EndpointName=None):
"""
Deletes a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint .\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
"""
pass
def delete_job(JobName=None):
"""
Deletes a specified job definition. If the job definition is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --The name of the job definition that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobName': 'string'
}
"""
pass
def delete_ml_transform(TransformId=None):
"""
Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms . However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform that was deleted.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string'
}
"""
pass
def delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Deletes a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partition to be deleted.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_resource_policy(PolicyHashCondition=None):
"""
Deletes a specified policy.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_resource_policy(
PolicyHashCondition='string'
)
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when this policy was set.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def delete_security_configuration(Name=None):
"""
Deletes a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Removes a table definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Deletes a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: [REQUIRED]\nThe ID of the table version to be deleted. A VersionID is a string representation of an integer. Each version is incremented by 1.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_trigger(Name=None):
"""
Deletes a specified trigger. If the trigger is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def delete_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Deletes an existing function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function definition to be deleted.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_workflow(Name=None):
"""
Deletes a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_workflow(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be deleted.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --Name of the workflow specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_catalog_import_status(CatalogId=None):
"""
Retrieves the status of a migration operation.
See also: AWS API Documentation
Exceptions
:example: response = client.get_catalog_import_status(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to migrate. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
Response Structure
(dict) --
ImportStatus (dict) --The status of the specified catalog migration.
ImportCompleted (boolean) --
True if the migration has completed, or False otherwise.
ImportTime (datetime) --The time that the migration was started.
ImportedBy (string) --The name of the person who initiated the migration.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
"""
pass
def get_classifier(Name=None):
"""
Retrieve a classifier by name.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
Response Structure
(dict) --
Classifier (dict) --The requested classifier.
GrokClassifier (dict) --A classifier that uses grok .
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
GrokPattern (string) --The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --A classifier for XML content.
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
RowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --A classifier for JSON content.
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
JsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --A classifier for comma-separated values (CSV).
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
Delimiter (string) --A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --Indicates whether the CSV file contains a header.
Header (list) --A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --Enables the processing of files that contain only one column.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_classifiers(MaxResults=None, NextToken=None):
"""
Lists all classifier objects in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifiers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The size of the list to return (optional).
:type NextToken: string
:param NextToken: An optional continuation token.
:rtype: dict
ReturnsResponse Syntax
{
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Classifiers (list) --
The requested list of classifier objects.
(dict) --
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.
You can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.
GrokClassifier (dict) --
A classifier that uses grok .
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
GrokPattern (string) --
The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --
Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --
A classifier for XML content.
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
RowTag (string) --
The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --
A classifier for JSON content.
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
JsonPath (string) --
A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --
A classifier for comma-separated values (CSV).
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
Delimiter (string) --
A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --
A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --
Indicates whether the CSV file contains a header.
Header (list) --
A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --
Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --
Enables the processing of files that contain only one column.
NextToken (string) --
A continuation token.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_connection(CatalogId=None, Name=None, HidePassword=None):
"""
Retrieves a connection definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connection(
CatalogId='string',
Name='string',
HidePassword=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to retrieve.\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:rtype: dict
ReturnsResponse Syntax
{
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
Response Structure
(dict) --
Connection (dict) --
The requested connection definition.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
:returns:
(string) --
"""
pass
def get_connections(CatalogId=None, Filter=None, HidePassword=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connections(
CatalogId='string',
Filter={
'MatchCriteria': [
'string',
],
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA'
},
HidePassword=True|False,
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type Filter: dict
:param Filter: A filter that controls which connections are returned.\n\nMatchCriteria (list) --A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.\n\n(string) --\n\n\nConnectionType (string) --The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.\n\n\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of connections to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
ConnectionList (list) --
A list of requested connection definitions.
(dict) --
Defines a connection to a data source.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
NextToken (string) --
A continuation token, if the list of connections returned does not include the last of the filtered connections.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_crawler(Name=None):
"""
Retrieves metadata for a specified crawler.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to retrieve metadata for.\n
:rtype: dict
ReturnsResponse Syntax{
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
Response Structure
(dict) --
Crawler (dict) --The metadata for the specified crawler.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
:returns:
(string) --
"""
pass
def get_crawler_metrics(CrawlerNameList=None, MaxResults=None, NextToken=None):
"""
Retrieves metrics about specified crawlers.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler_metrics(
CrawlerNameList=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type CrawlerNameList: list
:param CrawlerNameList: A list of the names of crawlers about which to retrieve metrics.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerMetricsList (list) --
A list of metrics for the specified crawler.
(dict) --
Metrics for a specified crawler.
CrawlerName (string) --
The name of the crawler.
TimeLeftSeconds (float) --
The estimated time left to complete a running crawl.
StillEstimating (boolean) --
True if the crawler is still estimating how long it will take to complete this run.
LastRuntimeSeconds (float) --
The duration of the crawler\'s most recent run, in seconds.
MedianRuntimeSeconds (float) --
The median duration of this crawler\'s runs, in seconds.
TablesCreated (integer) --
The number of tables created by this crawler.
TablesUpdated (integer) --
The number of tables updated by this crawler.
TablesDeleted (integer) --
The number of tables deleted by this crawler.
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_crawlers(MaxResults=None, NextToken=None):
"""
Retrieves metadata for all crawlers defined in the customer account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawlers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The number of crawlers to return on each call.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:rtype: dict
ReturnsResponse Syntax
{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Crawlers (list) --
A list of crawler metadata.
(dict) --
Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --
The name of the crawler.
Role (string) --
The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --
A collection of targets to crawl.
S3Targets (list) --
Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --
Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --
The path to the Amazon S3 target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --
Specifies JDBC targets.
(dict) --
Specifies a JDBC data store to crawl.
ConnectionName (string) --
The name of the connection to use to connect to the JDBC target.
Path (string) --
The path of the JDBC target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --
Specifies Amazon DynamoDB targets.
(dict) --
Specifies an Amazon DynamoDB table to crawl.
Path (string) --
The name of the DynamoDB table to crawl.
CatalogTargets (list) --
Specifies AWS Glue Data Catalog targets.
(dict) --
Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --
The name of the database to be synchronized.
Tables (list) --
A list of the tables to be synchronized.
(string) --
DatabaseName (string) --
The name of the database in which the crawler\'s output is stored.
Description (string) --
A description of the crawler.
Classifiers (list) --
A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --
The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --
The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --
The deletion behavior when the crawler finds a deleted object.
State (string) --
Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --
The prefix added to the names of tables that are created.
Schedule (dict) --
For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --
A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --
The state of the schedule.
CrawlElapsedTime (integer) --
If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --
The time that the crawler was created.
LastUpdated (datetime) --
The time that the crawler was last updated.
LastCrawl (dict) --
The status of the last crawl, and potentially error information if an error occurred.
Status (string) --
Status of the last crawl.
ErrorMessage (string) --
If an error occurred, the error information about the last crawl.
LogGroup (string) --
The log group for the last crawl.
LogStream (string) --
The log stream for the last crawl.
MessagePrefix (string) --
The prefix for a message about this crawl.
StartTime (datetime) --
The time at which the crawl started.
Version (integer) --
The version of the crawler.
Configuration (string) --
Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used by this crawler.
NextToken (string) --
A continuation token, if the returned list has not reached the end of those defined in this customer account.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_data_catalog_encryption_settings(CatalogId=None):
"""
Retrieves the security configuration for a specified catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_data_catalog_encryption_settings(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to retrieve the security configuration for. If none is provided, the AWS account ID is used by default.
:rtype: dict
ReturnsResponse Syntax{
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
Response Structure
(dict) --
DataCatalogEncryptionSettings (dict) --The requested security configuration.
EncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.
CatalogEncryptionMode (string) --The encryption-at-rest mode for encrypting Data Catalog data.
SseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.
ConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.
ReturnConnectionPasswordEncrypted (boolean) --When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.
AwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.
If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.
You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
"""
pass
def get_database(CatalogId=None, Name=None):
"""
Retrieves the definition of a specified database.
See also: AWS API Documentation
Exceptions
:example: response = client.get_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to retrieve. For Hive compatibility, this should be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
Response Structure
(dict) --
Database (dict) --
The definition of the specified database in the Data Catalog.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
:returns:
(string) --
(string) --
"""
pass
def get_databases(CatalogId=None, NextToken=None, MaxResults=None):
"""
Retrieves all databases defined in a given Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_databases(
CatalogId='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog from which to retrieve Databases . If none is provided, the AWS account ID is used by default.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of databases to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DatabaseList (list) --
A list of Database objects from the specified catalog.
(dict) --
The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
NextToken (string) --
A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_dataflow_graph(PythonScript=None):
"""
Transforms a Python script into a directed acyclic graph (DAG).
See also: AWS API Documentation
Exceptions
:example: response = client.get_dataflow_graph(
PythonScript='string'
)
:type PythonScript: string
:param PythonScript: The Python script to transform.
:rtype: dict
ReturnsResponse Syntax{
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
Response Structure
(dict) --
DagNodes (list) --A list of the nodes in the resulting DAG.
(dict) --Represents a node in a directed acyclic graph (DAG)
Id (string) --A node identifier that is unique within the node\'s graph.
NodeType (string) --The type of node that this is.
Args (list) --Properties of the node, in the form of name-value pairs.
(dict) --An argument or property of a node.
Name (string) --The name of the argument or property.
Value (string) --The value of the argument or property.
Param (boolean) --True if the value is used as a parameter.
LineNumber (integer) --The line number of the node.
DagEdges (list) --A list of the edges in the resulting DAG.
(dict) --Represents a directional edge in a directed acyclic graph (DAG).
Source (string) --The ID of the node at which the edge starts.
Target (string) --The ID of the node at which the edge ends.
TargetParameter (string) --The target of the edge.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
"""
pass
def get_dev_endpoint(EndpointName=None):
"""
Retrieves information about a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nName of the DevEndpoint to retrieve information for.\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
Response Structure
(dict) --
DevEndpoint (dict) --A DevEndpoint definition.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
:returns:
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
"""
pass
def get_dev_endpoints(MaxResults=None, NextToken=None):
"""
Retrieves all the development endpoints in this AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoints(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum size of information to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpoints (list) --
A list of DevEndpoint definitions.
(dict) --
A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --
The name of the DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --
A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --
The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --
A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --
The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --
The current status of this DevEndpoint .
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --
The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --
The status of the last update.
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --
The point in time at which this DevEndpoint was last modified.
PublicKey (string) --
The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --
A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --
A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
NextToken (string) --
A continuation token, if not all DevEndpoint definitions have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_job(JobName=None):
"""
Retrieves an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
Job (dict) --The requested job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_bookmark(JobName=None, RunId=None):
"""
Returns information on a job bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
A structure that defines a point that a job can resume processing.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
"""
pass
def get_job_run(JobName=None, RunId=None, PredecessorsIncluded=None):
"""
Retrieves the metadata for a given job run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_run(
JobName='string',
RunId='string',
PredecessorsIncluded=True|False
)
:type JobName: string
:param JobName: [REQUIRED]\nName of the job definition being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the job run.\n
:type PredecessorsIncluded: boolean
:param PredecessorsIncluded: True if a list of predecessor runs should be returned.
:rtype: dict
ReturnsResponse Syntax
{
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
JobRun (dict) --
The requested job-run metadata.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_runs(JobName=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_runs(
JobName='string',
NextToken='string',
MaxResults=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to retrieve all job runs.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobRuns (list) --
A list of job-run metadata objects.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all requested job runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_jobs(NextToken=None, MaxResults=None):
"""
Retrieves all current job definitions.
See also: AWS API Documentation
Exceptions
:example: response = client.get_jobs(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Jobs (list) --
A list of job definitions.
(dict) --
Specifies a job definition.
Name (string) --
The name you assign to this job definition.
Description (string) --
A description of the job.
LogUri (string) --
This field is reserved for future use.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --
The time and date that this job definition was created.
LastModifiedOn (datetime) --
The last point in time when this job definition was modified.
ExecutionProperty (dict) --
An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --
The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --
The JobCommand that executes this job.
Name (string) --
The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --
Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --
The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --
The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --
Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --
The connections used for this job.
Connections (list) --
A list of connections used by the job.
(string) --
MaxRetries (integer) --
The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --
The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --
Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all job definitions have yet been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_mapping(Source=None, Sinks=None, Location=None):
"""
Creates mappings.
See also: AWS API Documentation
Exceptions
:example: response = client.get_mapping(
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
}
)
:type Source: dict
:param Source: [REQUIRED]\nSpecifies the source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: A list of target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: Parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
Response Structure
(dict) --
Mapping (list) --
A list of mappings to the specified targets.
(dict) --
Defines a mapping.
SourceTable (string) --
The name of the source table.
SourcePath (string) --
The source path.
SourceType (string) --
The source type.
TargetTable (string) --
The target table.
TargetPath (string) --
The target path.
TargetType (string) --
The target type.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_ml_task_run(TransformId=None, TaskRunId=None):
"""
Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform\'s TransformID .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nThe unique identifier of the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the task run.
TaskRunId (string) --
The unique run identifier associated with this run.
Status (string) --
The status for this task run.
LogGroupName (string) --
The names of the log groups that are associated with the task run.
Properties (dict) --
The list of properties that are associated with the task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The error strings that are associated with the task run.
StartedOn (datetime) --
The date and time when this task run started.
LastModifiedOn (datetime) --
The date and time when this task run was last modified.
CompletedOn (datetime) --
The date and time when this task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_task_runs(TransformId=None, NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform\'s TransformID and other optional parameters as documented in this section.
This operation returns a list of historic runs and must be paginated.
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_runs(
TransformId='string',
NextToken='string',
MaxResults=123,
Filter={
'TaskRunType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'StartedBefore': datetime(2015, 1, 1),
'StartedAfter': datetime(2015, 1, 1)
},
Sort={
'Column': 'TASK_RUN_TYPE'|'STATUS'|'STARTED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type NextToken: string
:param NextToken: A token for pagination of the results. The default is empty.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter criteria, in the TaskRunFilterCriteria structure, for the task run.\n\nTaskRunType (string) --The type of task run.\n\nStatus (string) --The current status of the task run.\n\nStartedBefore (datetime) --Filter on task runs started before this date.\n\nStartedAfter (datetime) --Filter on task runs started after this date.\n\n\n
:type Sort: dict
:param Sort: The sorting criteria, in the TaskRunSortCriteria structure, for the task run.\n\nColumn (string) -- [REQUIRED]The column to be used to sort the list of task runs for the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used to sort the list of task runs for the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TaskRuns (list) --
A list of task runs that are associated with the transform.
(dict) --
The sampling parameters that are associated with the machine learning transform.
TransformId (string) --
The unique identifier for the transform.
TaskRunId (string) --
The unique identifier for this task run.
Status (string) --
The current status of the requested task run.
LogGroupName (string) --
The names of the log group for secure logging, associated with this task run.
Properties (dict) --
Specifies configuration properties associated with this task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The list of error strings associated with this task run.
StartedOn (datetime) --
The date and time that this task run started.
LastModifiedOn (datetime) --
The last point in time that the requested task run was updated.
CompletedOn (datetime) --
The last point in time that the requested task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transform(TransformId=None):
"""
Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform, generated at the time that the transform was created.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform, generated at the time that the transform was created.
Name (string) --The unique name given to the transform when it was created.
Description (string) --A description of the transform.
Status (string) --The last known status of the transform (to indicate whether it can be used or not). One of "NOT_READY", "READY", or "DELETING".
CreatedOn (datetime) --The date and time when the transform was created.
LastModifiedOn (datetime) --The date and time when the transform was last modified.
InputRecordTables (list) --A list of AWS Glue table definitions used by the transform.
(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --A database name in the AWS Glue Data Catalog.
TableName (string) --A table name in the AWS Glue Data Catalog.
CatalogId (string) --A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --The configuration parameters that are specific to the algorithm used.
TransformType (string) --The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --The latest evaluation metrics.
TransformType (string) --The type of machine learning transform.
FindMatchesMetrics (dict) --The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --The number of labels available for this transform.
Schema (list) --The Map<Column, Type> object that represents the schema that this transform accepts. Has an upper bound of 100 columns.
(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --The name of the column.
DataType (string) --The type of data in the column.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
GlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when this task runs.
Timeout (integer) --The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxRetries (integer) --The maximum number of times to retry a task for this transform after a task run fails.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type NextToken: string
:param NextToken: A paginated token to offset the results.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter transformation criteria.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: The sorting criteria.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Transforms (list) --
A list of machine learning transforms.
(dict) --
A structure for a machine learning transform.
TransformId (string) --
The unique transform ID that is generated for the machine learning transform. The ID is guaranteed to be unique and does not change.
Name (string) --
A user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.
Description (string) --
A user-defined, long-form description text for the machine learning transform. Descriptions are not guaranteed to be unique and can be changed at any time.
Status (string) --
The current status of the machine learning transform.
CreatedOn (datetime) --
A timestamp. The time and date that this machine learning transform was created.
LastModifiedOn (datetime) --
A timestamp. The last point in time when this machine learning transform was modified.
InputRecordTables (list) --
A list of AWS Glue table definitions used by the transform.
(dict) --
The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --
A database name in the AWS Glue Data Catalog.
TableName (string) --
A table name in the AWS Glue Data Catalog.
CatalogId (string) --
A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --
The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --
A TransformParameters object. You can use parameters to tune (customize) the behavior of the machine learning transform by specifying what data it learns from and your preference on various tradeoffs (such as precious vs. recall, or accuracy vs. cost).
TransformType (string) --
The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --
The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --
The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --
The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --
The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --
The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --
An EvaluationMetrics object. Evaluation metrics provide an estimate of the quality of your machine learning transform.
TransformType (string) --
The type of machine learning transform.
FindMatchesMetrics (dict) --
The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --
The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --
The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --
The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --
The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --
The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --
The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --
The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --
The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --
The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --
A count identifier for the labeling files generated by AWS Glue for this transform. As you create a better transform, you can iteratively download, label, and upload the labeling file.
Schema (list) --
A map of key-value pairs representing the columns and data types that this transform can run against. Has an upper bound of 100 columns.
(dict) --
A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --
The name of the column.
DataType (string) --
The type of data in the column.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
GlueVersion (string) --
This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a task of the transform runs.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
Timeout (integer) --
The timeout in minutes of the machine learning transform.
MaxRetries (integer) --
The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
:returns:
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Retrieves information about a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition in question resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partition resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partition\'s table.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Partition (dict) --
The requested information, in the form of a Partition object.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
:returns:
(string) --
"""
pass
def get_partitions(CatalogId=None, DatabaseName=None, TableName=None, Expression=None, NextToken=None, Segment=None, MaxResults=None):
"""
Retrieves information about the partitions in a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partitions(
CatalogId='string',
DatabaseName='string',
TableName='string',
Expression='string',
NextToken='string',
Segment={
'SegmentNumber': 123,
'TotalSegments': 123
},
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type Expression: string
:param Expression: An expression that filters the partitions to be returned.\nThe expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.\n\nOperators : The following are the operators that you can use in the Expression API call:\n=\n\nChecks whether the values of the two operands are equal; if yes, then the condition becomes true.\nExample: Assume \'variable a\' holds 10 and \'variable b\' holds 20.\n(a = b) is not true.\n\n< >\nChecks whether the values of two operands are equal; if the values are not equal, then the condition becomes true.\nExample: (a < > b) is true.\n\n>\nChecks whether the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.\nExample: (a > b) is not true.\n\n<\nChecks whether the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.\nExample: (a < b) is true.\n\n>=\nChecks whether the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a >= b) is not true.\n\n<=\nChecks whether the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a <= b) is true.\n\nAND, OR, IN, BETWEEN, LIKE, NOT, IS NULL\nLogical operators.\n\nSupported Partition Key Types : The following are the supported partition keys.\n\nstring\ndate\ntimestamp\nint\nbigint\nlong\ntinyint\nsmallint\ndecimal\n\nIf an invalid type is encountered, an exception is thrown.\nThe following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING , to be compatible with the catalog partitions.\n\nSample API Call :\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call to retrieve these partitions.
:type Segment: dict
:param Segment: The segment of the table\'s partitions to scan in this request.\n\nSegmentNumber (integer) -- [REQUIRED]The zero-based index number of the segment. For example, if the total number of segments is 4, SegmentNumber values range from 0 through 3.\n\nTotalSegments (integer) -- [REQUIRED]The total number of segments.\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of partitions to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Partitions (list) --
A list of requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
NextToken (string) --
A continuation token, if the returned list of partitions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_plan(Mapping=None, Source=None, Sinks=None, Location=None, Language=None):
"""
Gets code to perform a specified mapping.
See also: AWS API Documentation
Exceptions
:example: response = client.get_plan(
Mapping=[
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
],
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
},
Language='PYTHON'|'SCALA'
)
:type Mapping: list
:param Mapping: [REQUIRED]\nThe list of mappings from a source table to target tables.\n\n(dict) --Defines a mapping.\n\nSourceTable (string) --The name of the source table.\n\nSourcePath (string) --The source path.\n\nSourceType (string) --The source type.\n\nTargetTable (string) --The target table.\n\nTargetPath (string) --The target path.\n\nTargetType (string) --The target type.\n\n\n\n\n
:type Source: dict
:param Source: [REQUIRED]\nThe source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: The target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: The parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:type Language: string
:param Language: The programming language of the code to perform the mapping.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
A Python script to perform the mapping.
ScalaCode (string) --
The Scala code to perform the mapping.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_resource_policy():
"""
Retrieves a specified resource policy.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resource_policy()
:rtype: dict
ReturnsResponse Syntax{
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
Response Structure
(dict) --
PolicyInJson (string) --Contains the requested policy document, in JSON format.
PolicyHash (string) --Contains the hash value associated with this policy.
CreateTime (datetime) --The date and time at which the policy was created.
UpdateTime (datetime) --The date and time at which the policy was last updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
"""
pass
def get_security_configuration(Name=None):
"""
Retrieves a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
Response Structure
(dict) --
SecurityConfiguration (dict) --The requested security configuration.
Name (string) --The name of the security configuration.
CreatedTimeStamp (datetime) --The time at which this security configuration was created.
EncryptionConfiguration (dict) --The encryption configuration associated with this security configuration.
S3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
"""
pass
def get_security_configurations(MaxResults=None, NextToken=None):
"""
Retrieves a list of all security configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configurations(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
SecurityConfigurations (list) --
A list of security configurations.
(dict) --
Specifies a security configuration.
Name (string) --
The name of the security configuration.
CreatedTimeStamp (datetime) --
The time at which this security configuration was created.
EncryptionConfiguration (dict) --
The encryption configuration associated with this security configuration.
S3Encryption (list) --
The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --
Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --
The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --
The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --
The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --
The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --
The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
NextToken (string) --
A continuation token, if there are more security configurations to return.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Retrieves the Table definition in a Data Catalog for a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
Response Structure
(dict) --
Table (dict) --
The Table object that defines the specified table.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Retrieves a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: The ID value of the table version to be retrieved. A VersionID is a string representation of an integer. Each version is incremented by 1.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
Response Structure
(dict) --
TableVersion (dict) --
The requested table version.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_versions(CatalogId=None, DatabaseName=None, TableName=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of strings that identify available versions of a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_versions(
CatalogId='string',
DatabaseName='string',
TableName='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call.
:type MaxResults: integer
:param MaxResults: The maximum number of table versions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableVersions (list) --
A list of strings identifying available versions of the specified table.
(dict) --
Specifies a version of a table.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
NextToken (string) --
A continuation token, if the list of available versions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tables(CatalogId=None, DatabaseName=None, Expression=None, NextToken=None, MaxResults=None):
"""
Retrieves the definitions of some or all of the tables in a given Database .
See also: AWS API Documentation
Exceptions
:example: response = client.get_tables(
CatalogId='string',
DatabaseName='string',
Expression='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.\n
:type Expression: string
:param Expression: A regular expression pattern. If present, only those tables whose names match the pattern are returned.
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableList (list) --
A list of the requested Table objects.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
NextToken (string) --
A continuation token, present if the current list segment is not the last.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tags(ResourceArn=None):
"""
Retrieves a list of tags associated with a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.get_tags(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource for which to retrieve tags.\n
:rtype: dict
ReturnsResponse Syntax{
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --
Tags (dict) --The requested tags.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Tags': {
'string': 'string'
}
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_trigger(Name=None):
"""
Retrieves the definition of a trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.get_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --The requested trigger definition.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_triggers(NextToken=None, DependentJobName=None, MaxResults=None):
"""
Gets all the triggers associated with a job.
See also: AWS API Documentation
Exceptions
:example: response = client.get_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type DependentJobName: string
:param DependentJobName: The name of the job to retrieve triggers for. The trigger that can start this job is returned, and if there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Triggers (list) --
A list of triggers for the specified job.
(dict) --
Information about a specific trigger.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
NextToken (string) --
A continuation token, if not all the requested triggers have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Retrieves a specified function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
Response Structure
(dict) --
UserDefinedFunction (dict) --
The requested function definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_user_defined_functions(CatalogId=None, DatabaseName=None, Pattern=None, NextToken=None, MaxResults=None):
"""
Retrieves multiple function definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_functions(
CatalogId='string',
DatabaseName='string',
Pattern='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the functions to be retrieved are located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: The name of the catalog database where the functions are located.
:type Pattern: string
:param Pattern: [REQUIRED]\nAn optional function-name pattern string that filters the function definitions returned.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of functions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
UserDefinedFunctions (list) --
A list of requested function definitions.
(dict) --
Represents the equivalent of a Hive user-defined function (UDF ) definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
NextToken (string) --
A continuation token, if the list of functions returned does not include the last requested function.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def get_workflow(Name=None, IncludeGraph=None):
"""
Retrieves resource metadata for a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow(
Name='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to retrieve.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Workflow (dict) --
The resource metadata for the workflow.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run(Name=None, RunId=None, IncludeGraph=None):
"""
Retrieves the metadata for a given workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run(
Name='string',
RunId='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:rtype: dict
ReturnsResponse Syntax
{
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Run (dict) --
The requested workflow run metadata.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run_properties(Name=None, RunId=None):
"""
Retrieves the workflow run properties which were set during the run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run_properties(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run whose run properties should be returned.\n
:rtype: dict
ReturnsResponse Syntax
{
'RunProperties': {
'string': 'string'
}
}
Response Structure
(dict) --
RunProperties (dict) --
The workflow run properties which were set during the specified run.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'RunProperties': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_runs(Name=None, IncludeGraph=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_runs(
Name='string',
IncludeGraph=True|False,
NextToken='string',
MaxResults=123
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow whose metadata of runs should be returned.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:type NextToken: string
:param NextToken: The maximum size of the response.
:type MaxResults: integer
:param MaxResults: The maximum number of workflow runs to be included in the response.
:rtype: dict
ReturnsResponse Syntax
{
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Runs (list) --
A list of workflow run metadata objects.
(dict) --
A workflow run is an execution of a workflow providing all the runtime information.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
NextToken (string) --
A continuation token, if not all requested workflow runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def import_catalog_to_glue(CatalogId=None):
"""
Imports an existing Amazon Athena Data Catalog to AWS Glue
See also: AWS API Documentation
Exceptions
:example: response = client.import_catalog_to_glue(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to import. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def list_crawlers(MaxResults=None, NextToken=None, Tags=None):
"""
Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_crawlers(
MaxResults=123,
NextToken='string',
Tags={
'string': 'string'
}
)
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerNames (list) --
The names of all crawlers in the account, or the crawlers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_dev_endpoints(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_dev_endpoints(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpointNames (list) --
The names of all the DevEndpoint s in the account, or the DevEndpoint s with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_jobs(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_jobs(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobNames (list) --
The names of all jobs in the account, or the jobs with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None, Tags=None):
"""
Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
},
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Filter: dict
:param Filter: A TransformFilterCriteria used to filter the machine learning transforms.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: A TransformSortCriteria used to sort the machine learning transforms.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformIds': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TransformIds (list) --
The identifiers of all the machine learning transforms in the account, or the machine learning transforms with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformIds': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_triggers(NextToken=None, DependentJobName=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type DependentJobName: string
:param DependentJobName: The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TriggerNames (list) --
The names of all triggers in the account, or the triggers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_workflows(NextToken=None, MaxResults=None):
"""
Lists names of workflows created in the account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_workflows(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
Workflows (list) --
List of names of workflows in the account.
(string) --
NextToken (string) --
A continuation token, if not all workflow names have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflows': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def put_data_catalog_encryption_settings(CatalogId=None, DataCatalogEncryptionSettings=None):
"""
Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.
See also: AWS API Documentation
Exceptions
:example: response = client.put_data_catalog_encryption_settings(
CatalogId='string',
DataCatalogEncryptionSettings={
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default.
:type DataCatalogEncryptionSettings: dict
:param DataCatalogEncryptionSettings: [REQUIRED]\nThe security configuration to set.\n\nEncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.\n\nCatalogEncryptionMode (string) -- [REQUIRED]The encryption-at-rest mode for encrypting Data Catalog data.\n\nSseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\n\n\n\nConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\n\nReturnConnectionPasswordEncrypted (boolean) -- [REQUIRED]When the ReturnConnectionPasswordEncrypted flag is set to 'true', passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\n\nAwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.\nIf connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.\nYou can set the decrypt permission to enable or restrict access on the password key according to your security requirements.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def put_resource_policy(PolicyInJson=None, PolicyHashCondition=None, PolicyExistsCondition=None):
"""
Sets the Data Catalog resource policy for access control.
See also: AWS API Documentation
Exceptions
:example: response = client.put_resource_policy(
PolicyInJson='string',
PolicyHashCondition='string',
PolicyExistsCondition='MUST_EXIST'|'NOT_EXIST'|'NONE'
)
:type PolicyInJson: string
:param PolicyInJson: [REQUIRED]\nContains the policy document to set, in JSON format.\n
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when the previous policy was set using PutResourcePolicy . Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.
:type PolicyExistsCondition: string
:param PolicyExistsCondition: A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.
:rtype: dict
ReturnsResponse Syntax
{
'PolicyHash': 'string'
}
Response Structure
(dict) --
PolicyHash (string) --
A hash of the policy that has just been set. This must be included in a subsequent call that overwrites or updates this policy.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {
'PolicyHash': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def put_workflow_run_properties(Name=None, RunId=None, RunProperties=None):
"""
Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.
See also: AWS API Documentation
Exceptions
:example: response = client.put_workflow_run_properties(
Name='string',
RunId='string',
RunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run for which the run properties should be updated.\n
:type RunProperties: dict
:param RunProperties: [REQUIRED]\nThe properties to put for the specified run.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def reset_job_bookmark(JobName=None, RunId=None):
"""
Resets a bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.reset_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
The reset bookmark entry.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def search_tables(CatalogId=None, NextToken=None, Filters=None, SearchText=None, SortCriteria=None, MaxResults=None):
"""
Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.
You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.
See also: AWS API Documentation
Exceptions
:example: response = client.search_tables(
CatalogId='string',
NextToken='string',
Filters=[
{
'Key': 'string',
'Value': 'string',
'Comparator': 'EQUALS'|'GREATER_THAN'|'LESS_THAN'|'GREATER_THAN_EQUALS'|'LESS_THAN_EQUALS'
},
],
SearchText='string',
SortCriteria=[
{
'FieldName': 'string',
'Sort': 'ASC'|'DESC'
},
],
MaxResults=123
)
:type CatalogId: string
:param CatalogId: A unique identifier, consisting of `` account_id /datalake`` .
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type Filters: list
:param Filters: A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.\n\n(dict) --Defines a property predicate.\n\nKey (string) --The key of the property.\n\nValue (string) --The value of the property.\n\nComparator (string) --The comparator used to compare this property to others.\n\n\n\n\n
:type SearchText: string
:param SearchText: A string used for a text search.\nSpecifying a value in quotes filters based on an exact match to the value.\n
:type SortCriteria: list
:param SortCriteria: A list of criteria for sorting the results by a field name, in an ascending or descending order.\n\n(dict) --Specifies a field to sort by and a sort order.\n\nFieldName (string) --The name of the field on which to sort.\n\nSort (string) --An ascending or descending sort.\n\n\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
Response Structure
(dict) --
NextToken (string) --
A continuation token, present if the current list segment is not the last.
TableList (list) --
A list of the requested Table objects. The SearchTables response returns only the tables that you have access to.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
:returns:
(string) --
(string) --
"""
pass
def start_crawler(Name=None):
"""
Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to start.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_crawler_schedule(CrawlerName=None):
"""
Changes the schedule state of the specified crawler to SCHEDULED , unless the crawler is already running or the schedule state is already SCHEDULED .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler to schedule.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_export_labels_task_run(TransformId=None, OutputS3Path=None):
"""
Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId . You can check on the status of your task run by calling the GetMLTaskRun API.
See also: AWS API Documentation
Exceptions
:example: response = client.start_export_labels_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon S3 path where you export the labels.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_import_labels_task_run(TransformId=None, InputS3Path=None, ReplaceAllLabels=None):
"""
Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.
After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called \'labeling\' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d After the labeling process is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.
By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.
You can check on the status of your task run by calling the GetMLTaskRun operation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_import_labels_task_run(
TransformId='string',
InputS3Path='string',
ReplaceAllLabels=True|False
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type InputS3Path: string
:param InputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path from where you import the labels.\n
:type ReplaceAllLabels: boolean
:param ReplaceAllLabels: Indicates whether to overwrite your existing labels.
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_job_run(JobName=None, JobRunId=None, Arguments=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, NotificationProperty=None, WorkerType=None, NumberOfWorkers=None):
"""
Starts a job run using a job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.start_job_run(
JobName='string',
JobRunId='string',
Arguments={
'string': 'string'
},
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
NotificationProperty={
'NotifyDelayAfter': 123
},
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to use.\n
:type JobRunId: string
:param JobRunId: The ID of a previous JobRun to retry.
:type Arguments: dict
:param Arguments: The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type AllocatedCapacity: integer
:param AllocatedCapacity: This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job run.
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:rtype: dict
ReturnsResponse Syntax
{
'JobRunId': 'string'
}
Response Structure
(dict) --
JobRunId (string) --
The ID assigned to this job run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'JobRunId': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_ml_evaluation_task_run(TransformId=None):
"""
Starts a task to estimate the quality of the transform.
When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.
Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun .
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_evaluation_task_run(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:rtype: dict
ReturnsResponse Syntax{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --The unique identifier associated with this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
Glue.Client.exceptions.MLTransformNotReadyException
:return: {
'TaskRunId': 'string'
}
"""
pass
def start_ml_labeling_set_generation_task_run(TransformId=None, OutputS3Path=None):
"""
Starts the active learning workflow for your machine learning transform to improve the transform\'s quality by generating label sets and adding labels.
When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.
In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d
After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_labeling_set_generation_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path where you generate the labeling set.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique run identifier that is associated with this task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_trigger(Name=None):
"""
Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.
See also: AWS API Documentation
Exceptions
:example: response = client.start_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to start.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was started.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'Name': 'string'
}
"""
pass
def start_workflow_run(Name=None):
"""
Starts a new run of the specified workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.start_workflow_run(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to start.\n
:rtype: dict
ReturnsResponse Syntax{
'RunId': 'string'
}
Response Structure
(dict) --
RunId (string) --An Id for the new run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'RunId': 'string'
}
"""
pass
def stop_crawler(Name=None):
"""
If the specified crawler is running, stops the crawl.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to stop.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_crawler_schedule(CrawlerName=None):
"""
Sets the schedule state of the specified crawler to NOT_SCHEDULED , but does not stop the crawler if it is already running.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler whose schedule state to set.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_trigger(Name=None):
"""
Stops a specified trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to stop.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was stopped.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def stop_workflow_run(Name=None, RunId=None):
"""
Stops the execution of the specified workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_workflow_run(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to stop.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run to stop.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.IllegalWorkflowStateException
:return: {}
:returns:
(dict) --
"""
pass
def tag_resource(ResourceArn=None, TagsToAdd=None):
"""
Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue .
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceArn='string',
TagsToAdd={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN of the AWS Glue resource to which to add the tags. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern .\n
:type TagsToAdd: dict
:param TagsToAdd: [REQUIRED]\nTags to add to this resource.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagsToRemove=None):
"""
Removes tags from a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagsToRemove=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource from which to remove the tags.\n
:type TagsToRemove: list
:param TagsToRemove: [REQUIRED]\nTags to remove from this resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def update_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Modifies an existing classifier (a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field is present).
See also: AWS API Documentation
Exceptions
:example: response = client.update_classifier(
GrokClassifier={
'Name': 'string',
'Classification': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Name': 'string',
'Classification': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the GrokClassifier .\n\nClassification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nGrokPattern (string) --The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nClassification (string) --An identifier of the data format that the classifier matches.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This cannot identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_connection(CatalogId=None, Name=None, ConnectionInput=None):
"""
Updates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_connection(
CatalogId='string',
Name='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to update.\n
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object that redefines the connection in question.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):
"""
Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by the new crawler to access customer resources.
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: A list of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler_schedule(CrawlerName=None, Schedule=None):
"""
Updates the schedule of a crawler using a cron expression.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler_schedule(
CrawlerName='string',
Schedule='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nThe name of the crawler whose schedule to update.\n
:type Schedule: string
:param Schedule: The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_database(CatalogId=None, Name=None, DatabaseInput=None):
"""
Updates an existing database definition in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_database(
CatalogId='string',
Name='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the metadata database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.\n
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nA DatabaseInput object specifying the new definition of the metadata database in the catalog.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_dev_endpoint(EndpointName=None, PublicKey=None, AddPublicKeys=None, DeletePublicKeys=None, CustomLibraries=None, UpdateEtlLibraries=None, DeleteArguments=None, AddArguments=None):
"""
Updates a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.update_dev_endpoint(
EndpointName='string',
PublicKey='string',
AddPublicKeys=[
'string',
],
DeletePublicKeys=[
'string',
],
CustomLibraries={
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string'
},
UpdateEtlLibraries=True|False,
DeleteArguments=[
'string',
],
AddArguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint to be updated.\n
:type PublicKey: string
:param PublicKey: The public key for the DevEndpoint to use.
:type AddPublicKeys: list
:param AddPublicKeys: The list of public keys for the DevEndpoint to use.\n\n(string) --\n\n
:type DeletePublicKeys: list
:param DeletePublicKeys: The list of public keys to be deleted from the DevEndpoint .\n\n(string) --\n\n
:type CustomLibraries: dict
:param CustomLibraries: Custom Python or Java libraries to be loaded in the DevEndpoint .\n\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon Simple Storage Service (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java/Scala libraries with a DevEndpoint .\n\n\n\n
:type UpdateEtlLibraries: boolean
:param UpdateEtlLibraries: True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.
:type DeleteArguments: list
:param DeleteArguments: The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint .\n\n(string) --\n\n
:type AddArguments: dict
:param AddArguments: The map of arguments to add the map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n'--enable-glue-datacatalog': ''\n'GLUE_PYTHON_VERSION': '3'\n'GLUE_PYTHON_VERSION': '2'\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
:return: {}
:returns:
(dict) --
"""
pass
def update_job(JobName=None, JobUpdate=None):
"""
Updates an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_job(
JobName='string',
JobUpdate={
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to update.\n
:type JobUpdate: dict
:param JobUpdate: [REQUIRED]\nSpecifies the values with which to update the job definition.\n\nDescription (string) --Description of the job being defined.\n\nLogUri (string) --This field is reserved for future use.\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required).\n\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --The JobCommand that executes this job (required).\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --The maximum number of times to retry this job if it fails.\n\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --Specifies the configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --
Returns the name of the updated job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'JobName': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def update_ml_transform(TransformId=None, Name=None, Description=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None):
"""
Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).
See also: AWS API Documentation
Exceptions
:example: response = client.update_ml_transform(
TransformId='string',
Name='string',
Description='string',
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123
)
:type TransformId: string
:param TransformId: [REQUIRED]\nA unique identifier that was generated when the transform was created.\n
:type Name: string
:param Name: The unique name that you gave the transform when you created it.
:type Description: string
:param Description: A description of the transform. The default is an empty string.
:type Parameters: dict
:param Parameters: The configuration parameters that are specific to the transform type (algorithm) used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.
:type Timeout: integer
:param Timeout: The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier for the transform that was updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
"""
pass
def update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None):
"""
Updates a partition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValueList=[
'string',
],
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table in which the partition to be updated is located.\n
:type PartitionValueList: list
:param PartitionValueList: [REQUIRED]\nA list of the values defining the partition.\n\n(string) --\n\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nThe new partition object to update the partition to.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_table(CatalogId=None, DatabaseName=None, TableInput=None, SkipArchive=None):
"""
Updates a metadata table in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
},
SkipArchive=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nAn updated TableInput object to define the metadata table in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:type SkipArchive: boolean
:param SkipArchive: By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_trigger(Name=None, TriggerUpdate=None):
"""
Updates a trigger definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_trigger(
Name='string',
TriggerUpdate={
'Name': 'string',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to update.\n
:type TriggerUpdate: dict
:param TriggerUpdate: [REQUIRED]\nThe new values with which to update the trigger.\n\nName (string) --Reserved for future use.\n\nDescription (string) --A description of this trigger.\n\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --The actions initiated by this trigger.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --
The resulting trigger definition.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def update_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None, FunctionInput=None):
"""
Updates an existing function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be updated is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function to be updated is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that redefines the function in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_workflow(Name=None, Description=None, DefaultRunProperties=None):
"""
Updates an existing workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.update_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be updated.\n
:type Description: string
:param Description: The description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
| mit |
AlbertoPeon/invenio | modules/webaccess/lib/external_authentication_cern_unit_tests.py | 5 | 2815 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the user handling library."""
__revision__ = "$Id$"
import unittest
from invenio.config import CFG_CERN_SITE
from invenio.testutils import make_test_suite, run_test_suite, nottest
class ExternalAuthenticationCernTest(unittest.TestCase):
"""Test functions related to the CERN authentication."""
def setUp(self):
# pylint: disable=C0103
"""setting up helper variables for tests"""
from invenio import external_authentication_cern as cern
self.username, self.userpwd, self.useremail = \
open('demopwd.cfg', 'r').readline().strip().split(':', 2)
self.cern = cern.ExternalAuthCern()
@nottest
def test_auth_user_ok(self):
"""external authentication CERN - authorizing user through CERN system: should pass"""
self.assertEqual(self.cern.auth_user(self.username, self.userpwd), \
self.useremail)
@nottest
def test_auth_user_fail(self):
"""external authentication CERN - authorizing user through CERN system: should fail"""
self.assertEqual(self.cern.auth_user('patata', 'patata'), None)
@nottest
def test_fetch_user_groups_membership(self):
"""external authentication CERN - fetching user group membership at CERN"""
self.assertNotEqual(self.cern.fetch_user_groups_membership(self.useremail, self.userpwd), 0)
self.assertEqual(self.cern.fetch_user_groups_membership('patata', 'patata'), {})
@nottest
def test_fetch_user_preferences(self):
"""external authentication CERN - fetching user setting from CERN"""
self.assertEqual(self.cern.fetch_user_preferences(self.username, self.userpwd)['email'], self.useremail)
#self.assertRaises(KeyError, self.cern.fetch_user_preferences('patata', 'patata')['email'])
if CFG_CERN_SITE:
TEST_SUITE = make_test_suite(ExternalAuthenticationCernTest,)
else:
TEST_SUITE = make_test_suite()
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.